Beispiel #1
0
    def test_scheduling_atomic(self):
        scheduler = WFQRequestScheduler(self.distQueueA, self.qmmA, self.feuA)
        scheduler.mhp_cycle_period = 1
        scheduler.mhp_full_cycle = 3 * scheduler.mhp_cycle_period

        # Compare create and keep and measure direclty
        requests = [
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             num_pairs=5,
                             create_id=0),
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             num_pairs=3,
                             atomic=True,
                             create_id=1)
        ]
        for qid, req in enumerate(requests):
            scheduler._add_to_queue(req, qid)

        sim_run(1000)

        scheduler.inc_cycle()

        for create_id in [0, 0, 0, 1, 1, 1, 0, 0]:
            aid, request = scheduler.select_queue()
            self.assertEqual(request.create_id, create_id)
            scheduler._post_process_success(aid)

        aid, request = scheduler.select_queue()
        self.assertIs(aid, None)
Beispiel #2
0
    def test_scheduling_num_pairs_iterate(self):
        scheduler = WFQRequestScheduler(self.distQueueA, self.qmmA, self.feuA)

        # Compare high and low num pairs (non atomic)
        requests = [
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             num_pairs=6,
                             create_id=0),
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             num_pairs=3,
                             create_id=1)
        ]
        for qid, req in enumerate(requests):
            scheduler._add_to_queue(req, qid)

        sim_run(1000)

        scheduler.inc_cycle()

        for create_id in ([0, 1] * 3 + [0] * 3):
            aid, request = scheduler.select_queue()
            self.assertEqual(request.create_id, create_id)
            scheduler._post_process_success(aid)

        aid, request = scheduler.select_queue()
        self.assertIs(aid, None)
Beispiel #3
0
    def test_scheduling_fidelity(self):
        scheduler = WFQRequestScheduler(self.distQueueA, self.qmmA, self.feuA)

        # Compare high and low min_fidelity
        requests = [
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             min_fidelity=0.8,
                             create_id=0),
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             min_fidelity=0.6,
                             create_id=1)
        ]
        for qid, req in enumerate(requests):
            scheduler._add_to_queue(req, qid)

        sim_run(1000)

        scheduler.inc_cycle()

        for create_id in [1, 0]:
            aid, request = scheduler.select_queue()
            self.assertEqual(request.create_id, create_id)
            scheduler._post_process_success(aid)

        aid, request = scheduler.select_queue()
        self.assertIs(aid, None)
Beispiel #4
0
    def test_multiple_queues(self):
        sim_reset()
        alice = QuantumNode("alice", nodeID=0)
        bob = QuantumNode("bob", nodeID=1)
        conn = ClassicalFibreConnection(alice, bob, length=.0001)
        aliceDQ = EGPDistributedQueue(alice,
                                      conn,
                                      accept_all=True,
                                      numQueues=2)
        bobDQ = EGPDistributedQueue(bob, conn, accept_all=True, numQueues=2)

        nodes = [
            (alice, [aliceDQ]),
            (bob, [bobDQ]),
        ]
        conns = [(conn, "dqp_conn", [aliceDQ, bobDQ])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()
        alice_requests = [SchedulerRequest(), SchedulerRequest()]
        bob_requests = [SchedulerRequest(), SchedulerRequest()]
        aliceDQ.add(alice_requests[0], qid=0)
        aliceDQ.add(alice_requests[1], qid=1)
        bobDQ.add(bob_requests[0], qid=0)
        bobDQ.add(bob_requests[1], qid=1)
        sim_run(10)
        self.assertEqual(len(aliceDQ.queueList[0].queue), 2)
        self.assertEqual(len(aliceDQ.queueList[1].queue), 2)
        self.assertEqual(len(bobDQ.queueList[0].queue), 2)
        self.assertEqual(len(bobDQ.queueList[1].queue), 2)
Beispiel #5
0
    def test_scheduling_weights_same_req(self):
        weights = [0, 15, 5]
        scheduler = WFQRequestScheduler(self.distQueueA,
                                        self.qmmA,
                                        self.feuA,
                                        weights=weights)
        scheduler.mhp_cycle_period = 1
        scheduler.mhp_full_cycle = 3 * scheduler.mhp_cycle_period

        # Compare create and keep and measure direclty
        scheduler._add_to_queue(
            SchedulerRequest(timeout_cycle=None, sched_cycle=None,
                             create_id=2), 2)
        scheduler.inc_cycle()
        scheduler._add_to_queue(
            SchedulerRequest(timeout_cycle=None, sched_cycle=None,
                             create_id=1), 1)
        scheduler.inc_cycle()
        scheduler._add_to_queue(
            SchedulerRequest(timeout_cycle=None, sched_cycle=None,
                             create_id=0), 0)

        sim_run(1000)

        scheduler.inc_cycle()

        for create_id in [0, 1, 2]:
            aid, request = scheduler.select_queue()
            self.assertEqual(request.create_id, create_id)
            scheduler._post_process_success(aid)

        aid, request = scheduler.select_queue()
        self.assertIs(aid, None)
Beispiel #6
0
    def test_faulty_queue_ID(self):
        def add_callback(result):
            self.assertEqual(result[0], aliceDQ.DQ_REJECT)
            callback_called[0] = True

        sim_reset()

        callback_called = [False]

        alice = QuantumNode("alice", nodeID=0)
        bob = QuantumNode("bob", nodeID=1)
        conn = ClassicalFibreConnection(alice, bob, length=.0001)
        aliceDQ = EGPDistributedQueue(alice,
                                      conn,
                                      accept_all=True,
                                      numQueues=1)
        bobDQ = EGPDistributedQueue(bob, conn, accept_all=True, numQueues=1)
        aliceDQ.add_callback = add_callback

        nodes = [
            (alice, [aliceDQ]),
            (bob, [bobDQ]),
        ]
        conns = [(conn, "dqp_conn", [aliceDQ, bobDQ])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()
        request = SchedulerRequest()
        aliceDQ.add(request, qid=1)
        sim_run(10)
        self.assertTrue(callback_called[0])
Beispiel #7
0
    def test_update_mhp_cycle_number(self):
        def callback_alice(queue_item):
            callback_called[0] = True

        def callback_bob(queue_item):
            callback_called[1] = True

        sim_reset()
        callback_called = [False, False]
        alice = QuantumNode("alice", nodeID=0)
        bob = QuantumNode("bob", nodeID=1)
        conn = ClassicalFibreConnection(alice, bob, length=.0001)
        aliceDQ = EGPDistributedQueue(alice,
                                      conn,
                                      timeout_callback=callback_alice,
                                      accept_all=True)
        bobDQ = EGPDistributedQueue(bob,
                                    conn,
                                    timeout_callback=callback_bob,
                                    accept_all=True)

        nodes = [
            (alice, [aliceDQ]),
            (bob, [bobDQ]),
        ]
        conns = [(conn, "dqp_conn", [aliceDQ, bobDQ])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()
        request = SchedulerRequest(sched_cycle=1, timeout_cycle=2)
        aliceDQ.add(request, 0)
        sim_run(10)
        queue_item_alice = aliceDQ.local_peek(0)
        queue_item_bob = bobDQ.local_peek(0)
        self.assertFalse(queue_item_alice.ready)

        aliceDQ.update_mhp_cycle_number(1, 10)
        self.assertTrue(queue_item_alice.ready)
        self.assertFalse(queue_item_bob.ready)
        self.assertFalse(callback_called[0])
        self.assertFalse(callback_called[1])

        aliceDQ.update_mhp_cycle_number(2, 10)
        self.assertTrue(callback_called[0])
        self.assertFalse(callback_called[1])

        bobDQ.update_mhp_cycle_number(1, 10)
        self.assertTrue(queue_item_bob.ready)
        self.assertFalse(callback_called[1])

        bobDQ.update_mhp_cycle_number(2, 10)
        self.assertTrue(queue_item_bob.ready)
        self.assertTrue(callback_called[1])
Beispiel #8
0
    def test_slave_add_while_waiting(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=25)

        dq = DistributedQueue(node,
                              conn,
                              numQueues=3,
                              throw_local_queue_events=True)
        dq2 = DistributedQueue(node2,
                               conn,
                               numQueues=3,
                               throw_local_queue_events=True)
        dq.connect_to_peer_protocol(dq2, conn)

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        # Add one request for both master and slave
        create_id = 0
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq.add(request=request, qid=0)
        create_id = 1
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq2.add(request=request, qid=0)

        # Wait for slaves add to arrive but not the ack
        run_time = dq.comm_delay * (3 / 4)
        sim_run(run_time)

        # Add request from master
        create_id = 2
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq.add(request=request, qid=0)

        # Make sure things are added
        run_time = dq.comm_delay * 4
        sim_run(run_time)

        self.ready_items(dq.queueList[0])
        self.ready_items(dq2.queueList[0])
        self.check_local_queues(dq.queueList[0], dq2.queueList[0])
Beispiel #9
0
    def test_early_timeout(self):
        self.test_scheduler.configure_mhp_timings(10, 12, 0, 0)
        request = EGPRequest(max_time=10 * 12)

        sim_run(1)

        self.test_scheduler.add_request(request)
        self.assertFalse(self.timeout_handler_called[0])

        sim_run(10 * 12 + 1)

        self.assertTrue(self.timeout_handler_called[0])
Beispiel #10
0
    def test_scheduling_weights_diff_req(self):
        scheduler = WFQRequestScheduler(self.distQueueA, self.qmmA, self.feuA)

        # First we will find the estimated times for constructing a high and low fidelity pair
        high_fid = self.feuA.achievable_fidelities[-1][1]
        low_fid = self.feuA.achievable_fidelities[-2][1]
        cycles_high = scheduler._estimate_nr_of_cycles_per_pair(
            SchedulerRequest(min_fidelity=high_fid))
        cycles_low = scheduler._estimate_nr_of_cycles_per_pair(
            SchedulerRequest(min_fidelity=low_fid))

        # Weights such that high fid should be scheduled earlier than low fid
        weight_fraction = cycles_high / cycles_low
        weight_fraction_below = weight_fraction * 9 / 10
        weight_fraction_above = weight_fraction * 11 / 10

        # Construct scheduler with the computed weights
        weights = [weight_fraction_below, 1, weight_fraction_above]
        scheduler = WFQRequestScheduler(self.distQueueA,
                                        self.qmmA,
                                        self.feuA,
                                        weights=weights)

        # Compare different weights, diff requests
        scheduler._add_to_queue(
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             min_fidelity=high_fid,
                             create_id=0), 0)
        scheduler._add_to_queue(
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             min_fidelity=low_fid,
                             create_id=1), 1)
        scheduler._add_to_queue(
            SchedulerRequest(timeout_cycle=None,
                             sched_cycle=None,
                             min_fidelity=high_fid,
                             create_id=2), 2)

        sim_run(1000)

        scheduler.inc_cycle()

        for create_id in [2, 1, 0]:
            aid, request = scheduler.select_queue()
            self.assertEqual(request.create_id, create_id)
            scheduler._post_process_success(aid)

        aid, request = scheduler.select_queue()
        self.assertIs(aid, None)
Beispiel #11
0
    def test_full_wraparound(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=25)

        wSize = 2
        maxSeq = 6
        dq = DistributedQueue(node,
                              conn,
                              numQueues=3,
                              throw_local_queue_events=True,
                              myWsize=wSize,
                              otherWsize=wSize,
                              maxSeq=maxSeq)
        dq2 = DistributedQueue(node2,
                               conn,
                               numQueues=3,
                               throw_local_queue_events=True,
                               myWsize=wSize,
                               otherWsize=wSize,
                               maxSeq=maxSeq)
        dq.connect_to_peer_protocol(dq2, conn)

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        for timestep in range(1, maxSeq + 1):
            dq.add(timestep, 0)
            sim_run(timestep * 1000000)
            dq.queueList[0].queue[timestep - 1].ready = True
            dq2.queueList[0].queue[timestep - 1].ready = True

        dq.queueList[0].pop()
        dq2.queueList[0].pop()
        dq.add(maxSeq, 0)
        sim_run(maxSeq * 100000)

        dq.queueList[0].pop()
        dq2.queueList[0].pop()
        dq.queueList[0].pop()
        dq2.queueList[0].pop()
Beispiel #12
0
    def test_comm_timeout(self):
        self.callback_storage = []

        def add_callback(result):
            self.callback_storage.append(result)

        sim_reset()
        alice = QuantumNode("Alice", 1)
        bob = QuantumNode("Bob", 2)

        conn = ClassicalFibreConnection(alice, bob, length=0.01)
        aliceDQ = DistributedQueue(alice, conn)

        aliceDQ.add_callback = add_callback

        num_adds = 100
        aliceProto = TestProtocol(alice, aliceDQ, 1, maxNum=num_adds)

        nodes = [alice, bob]

        conns = [(conn, "dqp_conn", [aliceProto])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        sim_run(5000)

        expected_qid = 0
        expected_results = [(aliceDQ.DQ_TIMEOUT, expected_qid,
                             qseq % aliceDQ.myWsize, [alice.name, qseq])
                            for qseq in range(num_adds)]

        # Check that all attempted add's timed out
        self.assertEqual(self.callback_storage, expected_results)

        # Check that alice's distributed queue has no outstanding add acks
        self.assertEqual(aliceDQ.waitAddAcks, {})

        # Check that all of the local queues are empty
        for local_queue in aliceDQ.queueList:
            self.assertEqual(local_queue.queue, [])
            self.assertEqual(local_queue.sequence_to_item, {})

        # Check that we incremented the comms_seq
        self.assertEqual(aliceDQ.comms_seq, num_adds)
Beispiel #13
0
    def test_add_basic(self):

        # Set up two nodes and run a simulation in which items
        # are randomly added at specific time intervals
        sim_reset()
        alice = QuantumNode("Alice", 1)
        bob = QuantumNode("Bob", 2)

        conn = ClassicalFibreConnection(alice, bob, length=.0001)
        aliceDQ = DistributedQueue(alice, conn)
        bobDQ = DistributedQueue(bob, conn)

        aliceProto = TestProtocol(alice,
                                  aliceDQ,
                                  1,
                                  maxNum=aliceDQ.maxSeq // 2)
        bobProto = TestProtocol(bob, bobDQ, 1, maxNum=bobDQ.maxSeq // 2)

        nodes = [
            (alice, [aliceProto]),
            (bob, [bobProto]),
        ]
        conns = [(conn, "dqp_conn", [aliceProto, bobProto])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        sim_run(50000)

        # Check the Queue contains ordered elements from Alice and Bob
        qA = aliceDQ.queueList[0].sequence_to_item
        qB = bobDQ.queueList[0].sequence_to_item

        # First they should have the same length
        self.assertGreater(len(qA), 0)
        self.assertEqual(len(qA), len(qB))

        # Check the items are the same and the sequence numbers are ordered
        count = 0
        for k in range(len(qA)):
            self.assertEqual(qA[k].request, qB[k].request)
            self.assertEqual(qA[k].seq, qB[k].seq)
            self.assertEqual(qA[k].seq, count)
            count = count + 1
Beispiel #14
0
    def test_priority(self):
        sim_reset()
        num_priorities = 10
        dqpA = EGPDistributedQueue(node=self.nodeA,
                                   accept_all=True,
                                   numQueues=num_priorities)
        dqpB = EGPDistributedQueue(node=self.nodeB,
                                   accept_all=True,
                                   numQueues=num_priorities)
        dqpA.connect_to_peer_protocol(dqpB)
        qmmA = QuantumMemoryManagement(node=self.nodeA)
        test_scheduler = StrictPriorityRequestScheduler(distQueue=dqpA,
                                                        qmm=qmmA)
        test_scheduler.configure_mhp_timings(1, 2, 0, 0)

        requests = [
            EGPRequest(other_id=self.nodeB.nodeID,
                       num_pairs=1,
                       min_fidelity=1,
                       max_time=0,
                       purpose_id=0,
                       priority=i) for i in range(num_priorities)
        ]

        conn = dqpA.conn
        self.network = EasyNetwork(name="DQPNetwork",
                                   nodes=[(self.nodeA, [dqpA]),
                                          (self.nodeB, [dqpB])],
                                   connections=[(conn, "dqp_conn",
                                                 [dqpA, dqpB])])
        self.network.start()

        for i, request in enumerate(reversed(requests)):
            test_scheduler.add_request(request)
            sim_run(i * 5)
            for cycle in range(2 * test_scheduler.mhp_cycle_offset):
                test_scheduler.inc_cycle()

        for i in range(num_priorities):
            next_aid, next_request = test_scheduler._get_next_request()
            self.assertEqual((i, 0), next_aid)
            self.assertEqual(next_request.priority, i)
            test_scheduler.clear_request(next_aid)
Beispiel #15
0
    def test_scheduling_same_queue_same_req(self):
        scheduler = WFQRequestScheduler(self.distQueueA, self.qmmA, self.feuA)

        # Same req to same queue
        num_req = 3
        requests = [
            SchedulerRequest(timeout_cycle=None, sched_cycle=None, create_id=i)
            for i in range(num_req)
        ]
        for req in requests:
            scheduler._add_to_queue(req, 0)

        sim_run(1000)

        scheduler.inc_cycle()

        for i in range(3):
            aid, request = scheduler.select_queue()
            self.assertEqual(request.create_id, i)
            scheduler._post_process_success(aid)

        aid, request = scheduler.select_queue()
        self.assertIs(aid, None)
Beispiel #16
0
    def test_resend_acks(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=25)

        dq = DistributedQueue(node,
                              conn,
                              numQueues=3,
                              throw_local_queue_events=True)
        dq2 = DistributedQueue(node2,
                               conn,
                               numQueues=3,
                               throw_local_queue_events=True)
        dq.connect_to_peer_protocol(dq2, conn)

        storage1 = []
        storage2 = []

        def callback1(result):
            storage1.append(result)

        def callback2(result):
            storage2.append(result)

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()
        dq.add_callback = callback1
        dq2.add_callback = callback2

        # Add one request
        create_id = 0
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq2.add(request=request, qid=0)
        run_time = dq.comm_delay * dq.timeout_factor
        sim_run(run_time)

        # Set way to short timeout (to force resend)
        dq2.timeout_factor = 1 / 2

        # Add one request (slave)
        create_id = 1
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq2.add(request=request, qid=0)
        run_time += (dq.comm_delay + 1) * 4
        sim_run(run_time)

        # Set to correct factor again
        dq2.timeout_factor = 2

        create_id = 2
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq.add(request=request, qid=0)
        dq2.add(request=request, qid=0)
        run_time += dq.comm_delay * dq.timeout_factor
        sim_run(run_time)

        self.assertEqual(len(storage1), 4)
        self.assertEqual(len(storage2), 4)

        q_seqs1 = [res[2] for res in storage1]
        q_seqs2 = [res[2] for res in storage2]

        for qseq in range(4):
            for q_seqs in [q_seqs1, q_seqs2]:
                # TODO do we care about the ordering?
                self.assertIn(qseq, q_seqs)
Beispiel #17
0
    def test_unordered_subsequent_acks(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=25)

        dq = DistributedQueue(node,
                              conn,
                              numQueues=3,
                              throw_local_queue_events=True)
        dq2 = DistributedQueue(node2,
                               conn,
                               numQueues=3,
                               throw_local_queue_events=True)
        dq.connect_to_peer_protocol(dq2, conn)

        storage1 = []
        storage2 = []

        def callback1(result):
            storage1.append(result)

        def callback2(result):
            storage2.append(result)

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        # Add three requests (master)
        dq.add_callback = callback1
        dq2.add_callback = callback2
        for create_id in range(3):
            request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True,
                                       False, False, True)
            dq.add(request=request, qid=0)
        run_time = dq.comm_delay * dq.timeout_factor
        sim_run(run_time)
        self.assertEqual(len(storage1), 3)
        self.assertEqual(len(storage2), 3)
        q_seqs1 = [res[2] for res in storage1]
        q_seqs2 = [res[2] for res in storage2]
        self.assertEqual(q_seqs1, [0, 1, 2])
        self.assertEqual(q_seqs2, [0, 1, 2])

        # Remove one request (such that next queue seq will be 0 again)
        dq.remove_item(0, 1)
        dq2.remove_item(0, 1)
        storage1 = []
        storage2 = []

        # Add requests from master and slave
        create_id = 3
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq.add(request=request, qid=0)
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq2.add(request=request, qid=0)

        run_time += dq.comm_delay * dq.timeout_factor
        sim_run(run_time)

        self.assertEqual(len(storage1), 2)
        self.assertEqual(len(storage2), 2)
        q_seqs1 = [res[2] for res in storage1]
        q_seqs2 = [res[2] for res in storage2]
        self.assertIn(1, q_seqs1)
        self.assertIn(3, q_seqs1)
        self.assertIn(1, q_seqs2)
        self.assertIn(3, q_seqs2)
Beispiel #18
0
    def test_lossy_comms_wraparound(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=0.1)
        dq = DistributedQueue(node,
                              conn,
                              numQueues=3,
                              throw_local_queue_events=True)
        dq2 = DistributedQueue(node2,
                               conn,
                               numQueues=3,
                               throw_local_queue_events=True)
        dq.connect_to_peer_protocol(dq2, conn)

        self.lost_messages = 0
        self.lost_seq = dq.myWsize + 1

        def faulty_send_msg(cmd, data, clock):
            if self.lost_messages == 0 and clock[0] == self.lost_seq:
                self.lost_messages += 1
            else:
                dq.conn.put_from(dq.myID, (cmd, data, clock))

        dq.send_msg = faulty_send_msg

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        num_adds = 0
        r = 1
        curr_time = 0
        import pdb
        while num_adds < 2 * dq.maxCommsSeq:
            add_delay = 2
            for i in range(dq.myWsize):
                request = SchedulerRequest(0, 0, 0, 0, i, 0, 0, True, False,
                                           False, True)
                dq.add(request)
                sim_run(curr_time + (i + 1) * add_delay)
                curr_time += add_delay

            for j in range(dq2.myWsize):
                request = SchedulerRequest(0, 0, 0, 0, j, 0, 0, True, False,
                                           False, True)
                dq2.add(request)
                sim_run(curr_time + (j + 1) * add_delay)
                curr_time += add_delay

            num_adds += dq.myWsize
            num_adds += dq2.myWsize
            run_time = r * dq.comm_delay * 4
            sim_run(curr_time + run_time)
            curr_time += run_time
            r += 1
            self.ready_items(dq.queueList[0])
            self.ready_items(dq2.queueList[0])
            self.check_local_queues(dq.queueList[0], dq2.queueList[0])

            for i in range(dq.myWsize + dq2.myWsize):
                dq.local_pop()
                dq2.local_pop()
Beispiel #19
0
    def test_next(self):
        sim_reset()
        dqpA = EGPDistributedQueue(node=self.nodeA, accept_all=True)
        dqpB = EGPDistributedQueue(node=self.nodeB, accept_all=True)
        dqpA.connect_to_peer_protocol(dqpB)
        qmmA = QuantumMemoryManagement(node=self.nodeA)
        test_scheduler = StrictPriorityRequestScheduler(distQueue=dqpA,
                                                        qmm=qmmA)
        test_scheduler.configure_mhp_timings(1, 2, 0, 0)

        request = EGPRequest(other_id=self.nodeB.nodeID,
                             num_pairs=1,
                             min_fidelity=1,
                             max_time=0,
                             purpose_id=0,
                             priority=0)

        conn = dqpA.conn
        self.network = EasyNetwork(name="DQPNetwork",
                                   nodes=[(self.nodeA, [dqpA]),
                                          (self.nodeB, [dqpB])],
                                   connections=[(conn, "dqp_conn",
                                                 [dqpA, dqpB])])
        self.network.start()

        # Check that an empty queue has a default request
        self.assertEqual(test_scheduler.get_default_gen(),
                         test_scheduler.next())

        # Check that an item not agreed upon also yields a default request
        test_scheduler.add_request(request)
        self.assertEqual(test_scheduler.get_default_gen(),
                         test_scheduler.next())

        for i in range(11):
            sim_run(11)
            test_scheduler.inc_cycle()
            self.assertEqual(test_scheduler.get_default_gen(),
                             test_scheduler.next())
        test_scheduler.inc_cycle()

        # Check that QMM reserve failure yields a default request
        comm_q = qmmA.reserve_communication_qubit()
        storage_q = [
            qmmA.reserve_storage_qubit() for _ in range(request.num_pairs)
        ]
        self.assertEqual(test_scheduler.get_default_gen(),
                         test_scheduler.next())

        # Return the reserved resources
        qmmA.vacate_qubit(comm_q)
        for q in storage_q:
            qmmA.vacate_qubit(q)

        # Check that lack of peer resources causes a default request
        self.assertEqual(test_scheduler.get_default_gen(),
                         test_scheduler.next())

        # Verify that now we can obtain the next request
        test_scheduler.other_mem = (1, request.num_pairs)

        # Verify that the next request is the one we submitted
        gen = test_scheduler.next()
        self.assertEqual(gen, (True, (0, 0), 0, 1, {}))
Beispiel #20
0
    def test_excessive_full_queue(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=25)

        wSize = 2
        maxSeq = 6
        dq = WFQDistributedQueue(node,
                                 conn,
                                 numQueues=3,
                                 throw_local_queue_events=True,
                                 accept_all=True,
                                 myWsize=wSize,
                                 otherWsize=wSize,
                                 maxSeq=maxSeq)
        dq2 = WFQDistributedQueue(node2,
                                  conn,
                                  numQueues=3,
                                  throw_local_queue_events=True,
                                  accept_all=True,
                                  myWsize=wSize,
                                  otherWsize=wSize,
                                  maxSeq=maxSeq)
        dq.connect_to_peer_protocol(dq2, conn)

        pm = PM_Controller()
        ds = EGPLocalQueueSequence(name="EGP Local Queue A {}".format(0),
                                   dbFile='test.db')

        pm.addEvent(dq.queueList[0], dq.queueList[0]._EVT_ITEM_ADDED, ds=ds)
        pm.addEvent(dq.queueList[0], dq.queueList[0]._EVT_ITEM_REMOVED, ds=ds)

        storage = []

        def callback(result):
            storage.append(result)

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        dq.add_callback = callback
        dq2.add_callback = callback
        k = 0
        for i in range(1, 3 * dq2.maxSeq):
            for j in range(3 * dq2.maxSeq):
                try:
                    r = WFQSchedulerRequest(0, 0, 0, 0, k, 0, 0, 0, True,
                                            False, False, True)
                    k += 1
                    if j % i:
                        dq.add(request=r, qid=0)
                    else:
                        dq2.add(request=r, qid=0)
                except Exception:
                    pass

            sim_run(i * dq.comm_delay * dq.timeout_factor + 1)

            for seq in range(dq.maxSeq):
                qitem = dq.queueList[0].sequence_to_item.get(seq)
                q2item = dq2.queueList[0].sequence_to_item.get(seq)
                self.assertEqual(qitem.request, q2item.request)

            self.assertEqual(len(dq.backlogAdd), 0)
            self.assertEqual(len(dq2.backlogAdd), 0)

            for j in range(dq.maxSeq):
                dq.remove_item(0, j)
                dq2.remove_item(0, j)
Beispiel #21
0
    def test_full_queue(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=0.0001)

        dq = DistributedQueue(node, conn, numQueues=3)
        dq2 = DistributedQueue(node2, conn, numQueues=3)
        dq.connect_to_peer_protocol(dq2, conn)

        storage = []

        def callback(result):
            storage.append(result)

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        for j in range(dq.maxSeq):
            dq.add(request=j + 1, qid=0)

        sim_run(1000)

        with self.assertRaises(LinkLayerException):
            dq.add(request=0, qid=0)

        dq2.add_callback = callback
        for j in range(dq2.maxSeq):
            dq2.add(request=j + 1, qid=1)

        sim_run(2000)

        with self.assertRaises(LinkLayerException):
            dq2.add(request=dq2.maxSeq + 1, qid=1)

        self.assertEqual(len(storage), 257)
        self.assertEqual(storage[-1], (dq2.DQ_ERR, 1, None, dq2.maxSeq + 1))
        storage = []

        for j in range(dq.maxSeq):
            if j % 2:
                dq.add(request=j + 1, qid=2)
            else:
                dq2.add(request=j + 1, qid=2)

        dq2.add(request=dq.maxSeq + 1, qid=2)
        sim_run(3000)

        with self.assertRaises(LinkLayerException):
            dq.add(request=0, qid=2)

        self.assertEqual(len(storage), 257)
        self.assertEqual(storage[-1], (dq2.DQ_REJECT, 2, 0, dq2.maxSeq + 1))
Beispiel #22
0
    def test_remove(self):
        # Set up two nodes and run a simulation in which items
        # are randomly added at specific time intervals
        sim_reset()
        alice = QuantumNode("Alice", 1)
        bob = QuantumNode("Bob", 2)

        conn = ClassicalFibreConnection(alice, bob, length=.0001)
        aliceDQ = DistributedQueue(alice, conn)
        bobDQ = DistributedQueue(bob, conn)

        aliceProto = TestProtocol(alice, aliceDQ, 1)
        bobProto = TestProtocol(bob, bobDQ, 1)

        nodes = [
            (alice, [aliceProto]),
            (bob, [bobProto]),
        ]
        conns = [(conn, "dqp_conn", [aliceProto, bobProto])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        sim_run(1000)

        # Check the Queue contains ordered elements from Alice and Bob
        queueA = aliceDQ.queueList[0]
        queueB = bobDQ.queueList[0]
        qA = queueA.sequence_to_item
        qB = queueB.sequence_to_item

        # Make all the items ready
        for seq in qA:
            queueA.ack(seq)
            queueA.ready(seq)
        for seq in qB:
            queueB.ack(seq)
            queueB.ready(seq)

        # First they should have the same length
        self.assertGreater(len(qA), 0)
        self.assertEqual(len(qA), len(qB))

        # Check the items are the same and the sequence numbers are ordered
        count = 0
        for k in range(len(qA)):
            self.assertEqual(qA[k].request, qB[k].request)
            self.assertEqual(qA[k].seq, qB[k].seq)
            self.assertEqual(qA[k].seq, count)
            count = count + 1

        # Check that we can remove the items (locally) at both nodes
        rqid = 0
        rqseqs = set([randint(0, len(qA) - 1) for t in range(10)])
        for qseq in rqseqs:
            q_item = aliceDQ.remove_item(rqid, qseq)
            self.assertIsNotNone(q_item)
            self.assertFalse(aliceDQ.queueList[rqid].contains(qseq))

        # Check that we can pop the remaining items in the correct order
        remaining = set(range(len(qB))) - rqseqs
        for qseq in remaining:
            q_item = aliceDQ.local_pop(rqid)
            self.assertEqual(q_item.seq, qseq)
            self.assertIsNotNone(q_item)
Beispiel #23
0
    def test_lost_add_slave(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=0.0001)

        dq = DistributedQueue(node, conn, numQueues=3)
        dq2 = DistributedQueue(node2, conn, numQueues=3)
        dq.connect_to_peer_protocol(dq2, conn)

        self.lost_messages = defaultdict(int)

        def faulty_send_add(cmd, data, clock):
            if cmd == dq2.CMD_ADD:
                _, cseq, qid, qseq, request = data
                if self.lost_messages[(cseq, qid, qseq)] >= 1:
                    dq2.conn.put_from(dq2.myID, (cmd, data, clock))
                else:
                    self.lost_messages[(cseq, qid, qseq)] += 1
            else:
                dq2.conn.put_from(dq2.myID, (cmd, data, clock))

        def faulty_send_ack(cmd, data, clock):
            if cmd == dq.CMD_ADD_ACK:
                _, ackd_id, qseq = data
                if self.lost_messages[(ackd_id, qseq)] >= 1:
                    dq.conn.put_from(dq.myID, (cmd, data, clock))
                else:
                    self.lost_messages[(ackd_id, qseq)] += 1
            else:
                dq.conn.put_from(dq.myID, (cmd, data, clock))

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        dq2.send_msg = faulty_send_add
        dq.send_msg = faulty_send_ack

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        reqs = []
        num_reqs = 10
        for i in range(num_reqs):
            req = [node2.nodeID, i]
            reqs.append(req)
            dq2.add(req)
            sim_run(0.1 * (i + 1))

        sim_run(200)

        # Check that all add and add_ack messages were lost once
        for v in self.lost_messages.values():
            self.assertEqual(v, 1)

        # Check that the item successfully got added
        self.assertEqual(len(dq.queueList[0].queue), 10)
        self.assertEqual(len(dq2.queueList[0].queue), 10)

        for i, expected_req in zip(range(num_reqs), reqs):
            item1 = dq.queueList[0].queue[i]
            item2 = dq2.queueList[0].queue[i]
            self.assertEqual(item1.request, expected_req)
            self.assertEqual(item2.request, expected_req)
        # Mark that we are waiting for an ack for this
        self.waitAddAcks[self.comms_seq] = [qid, 0, request]

        # Increment acks we are waiting for
        self.acksWaiting = self.acksWaiting + 1

        # Increment our own sequence number of this request to add
        self.comms_seq = (self.comms_seq + 1) % self.maxSeq


sim_reset()
logger = setup_logging("GLOBAL", "logFile", level=logging.DEBUG)
alice = QuantumNode("Alice", 1, logger=logger)
bob = QuantumNode("Bob", 2, logger=logger)

conn = ClassicalConnection(1, 2)
aliceProto = DistributedQueue(alice, conn)
bobProto = DistributedQueue(bob, conn)

alice.setup_connection(2, conn, classicalProtocol=aliceProto)
bob.setup_connection(1, conn, classicalProtocol=bobProto)
alice.start()
bob.start()

aliceProto.send_hello()

aliceProto.add("Foo")

sim_run(500)
Beispiel #25
0
    def test_random_add_remove(self):
        sim_reset()
        node = QuantumNode("TestNode 1", 1)
        node2 = QuantumNode("TestNode 2", 2)
        conn = ClassicalFibreConnection(node, node2, length=25)

        dq = DistributedQueue(node,
                              conn,
                              numQueues=3,
                              throw_local_queue_events=True)
        dq2 = DistributedQueue(node2,
                               conn,
                               numQueues=3,
                               throw_local_queue_events=True)
        dq.connect_to_peer_protocol(dq2, conn)

        nodes = [
            (node, [dq]),
            (node2, [dq2]),
        ]
        conns = [(conn, "dq_conn", [dq, dq2])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        create_id = 0
        for _ in range(20):
            # Add random requests to master
            num_reqs_master = randint(0, 3)
            for _ in range(num_reqs_master):
                request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True,
                                           False, False, True)
                try:
                    dq.add(request=request, qid=0)
                except LinkLayerException:
                    # Full queue
                    pass
                create_id += 1

            # Add random requests to slave
            num_reqs_slave = randint(0, 3)
            for _ in range(num_reqs_slave):
                request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True,
                                           False, False, True)
                try:
                    dq2.add(request=request, qid=0)
                except LinkLayerException:
                    # Full queue
                    pass
                create_id += 1

            # Randomly remove things for both
            num_pop = randint(0, 6)
            for _ in range(num_pop):
                dq.local_pop(qid=0)

            # Run for random fraction of timeout
            r = randint(1, 20)
            run_time = dq.comm_delay * dq.timeout_factor * (r / 10)
            sim_run(run_time)

        # Make sure things are not in flight
        sim_run()

        self.ready_items(dq.queueList[0])
        self.ready_items(dq2.queueList[0])
        self.check_local_queues(dq.queueList[0], dq2.queueList[0])

        # Add one request for both master and slave
        create_id = 0
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq.add(request=request, qid=0)
        create_id = 1
        request = SchedulerRequest(0, 0, 0, 0, create_id, 0, 0, True, False,
                                   False, True)
        dq2.add(request=request, qid=0)
Beispiel #26
0
    def test_rules(self):
        sim_reset()
        alice = QuantumNode("Alice", 1)
        bob = QuantumNode("Bob", 2)

        self.result = None

        def add_callback(result):
            self.result = result

        conn = ClassicalFibreConnection(alice, bob, length=.0001)
        aliceDQ = FilteredDistributedQueue(alice, conn)
        aliceDQ.add_callback = add_callback
        bobDQ = FilteredDistributedQueue(bob, conn)
        bobDQ.add_callback = add_callback

        nodes = [
            (alice, [aliceDQ]),
            (bob, [bobDQ]),
        ]
        conns = [(conn, "dqp_conn", [aliceDQ, bobDQ])]

        network = EasyNetwork(name="DistQueueNetwork",
                              nodes=nodes,
                              connections=conns)
        network.start()

        # Test that we cannot add a request
        request = SchedulerRequest(num_pairs=1,
                                   min_fidelity=0.5,
                                   timeout_cycle=10,
                                   purpose_id=0,
                                   priority=10)

        # Test that no rule results in rejection of request
        aliceDQ.add(request)
        expected_qid = 0
        expected_qseq = 0
        sim_run(2)
        self.assertIsNotNone(self.result)
        reported_request = self.result[-1]
        self.assertEqual(reported_request, request)
        self.assertEqual(self.result[:3],
                         (aliceDQ.DQ_REJECT, expected_qid, expected_qseq))

        # Reset result
        self.result = None

        # Test that we can now add a request with the rule in place
        bobDQ.add_accept_rule(nodeID=alice.nodeID, purpose_id=0)
        aliceDQ.add(request)
        expected_qseq = 0
        sim_run(4)
        self.assertIsNotNone(self.result)
        reported_request = self.result[-1]
        self.assertEqual(reported_request, request)
        self.assertEqual(self.result[:3],
                         (aliceDQ.DQ_OK, expected_qid, expected_qseq))

        # Reset result
        self.result = None

        # Test that we can remove the acception rule and request will get rejected
        bobDQ.remove_accept_rule(nodeID=alice.nodeID, purpose_id=0)
        aliceDQ.add(request)
        expected_qseq += 1
        sim_run(6)
        self.assertIsNotNone(self.result)
        reported_request = self.result[-1]
        self.assertEqual(reported_request, request)
        self.assertEqual(self.result[:3],
                         (aliceDQ.DQ_REJECT, expected_qid, expected_qseq))
    def test_run_protocol(self):
        paramsA = {
            "num_pairs": 1,
            "tmax_pair": 0,
            "min_fidelity": 0.9,
            "purpose_id": 0,
            "priority": 0,
            "store": True,
            "atomic": False,
            "measure_directly": False
        }
        paramsB = {
            "num_pairs": 2,
            "tmax_pair": 1,
            "min_fidelity": 0.8,
            "purpose_id": 0,
            "priority": 1,
            "store": False,
            "atomic": False,
            "measure_directly": True
        }
        paramsC = {
            "num_pairs": [3, 4],
            "tmax_pair": 2,
            "min_fidelity": 0.7,
            "purpose_id": 0,
            "priority": 2,
            "store": True,
            "atomic": True,
            "measure_directly": False
        }
        request_params = {
            "A": {
                "prob": 0.1,
                "params": paramsA
            },
            "B": {
                "prob": 0.3,
                "params": paramsB
            },
            "C": {
                "prob": 0.5,
                "params": paramsC
            },
        }

        scen = MixedScenario(self.egp, 1, request_params)

        requests = []

        def fake_create(cqc_request_raw):
            egp_request = NodeCentricEGP._get_egp_request(cqc_request_raw)
            requests.append(egp_request)

        scen._create = fake_create

        scen.start()

        num_cycles = 5000
        sim_run(num_cycles)

        prioritites = [req.priority for req in requests]
        fractions = [prioritites.count(i) / num_cycles for i in range(3)]
        avg_num_pairs = {"A": 1, "B": 2, "C": 3.5}
        ideal_fractions = [
            request_params[name]["prob"] / avg_num_pairs[name]
            for name in ["A", "B", "C"]
        ]
        for f, id_f in zip(fractions, ideal_fractions):
            self.assertAlmostEqual(f, id_f, places=1)

        Crequests = []
        for req in requests:
            if req.priority == 0:
                params = paramsA
                self.assertEqual(req.num_pairs, params["num_pairs"])
            elif req.priority == 1:
                params = paramsB
                self.assertEqual(req.num_pairs, params["num_pairs"])
            else:
                params = paramsC
                Crequests.append(req)
            self.assertAlmostEqual(req.max_time,
                                   params["tmax_pair"] * req.num_pairs)
            self.assertAlmostEqual(req.min_fidelity, params["min_fidelity"])
            self.assertEqual(req.purpose_id, params["purpose_id"])
            self.assertEqual(req.store, params["store"])
            self.assertEqual(req.atomic, params["atomic"])
            self.assertEqual(req.measure_directly, params["measure_directly"])

        frac_num_pairs = [[req.num_pairs
                           for req in Crequests].count(n_p) / len(Crequests)
                          for n_p in paramsC["num_pairs"]]
        one_over_num_pairs = [1 / num_p for num_p in paramsC["num_pairs"]]
        ideal_frac_num_pairs = [
            p / sum(one_over_num_pairs) for p in one_over_num_pairs
        ]
        for f_n_p, i_f_n_p in zip(frac_num_pairs, ideal_frac_num_pairs):
            self.assertAlmostEqual(f_n_p, i_f_n_p, places=1)
def run_simulation(tmp_filebasename, final_filebasename, sim_dir, request_paramsA, request_paramsB, name=None,
                   config=None, num_priorities=1, egp_queue_weights=None, request_cycle=0, max_sim_time=float('inf'),
                   max_wall_time=float('inf'), max_mhp_cycle=float('inf'), enable_pdb=False, t0=0, t_cycle=0,
                   alphaA=0.1, alphaB=0.1, wall_time_per_timestep=60, save_additional_data=True,
                   collect_queue_data=False, log_to_file=True, log_level=None, filter_debug_logging=True,
                   log_to_console=False):

    # Setup logging
    if log_to_file:
        log_file = "{}_log.out".format(final_filebasename)
        if log_level is not None:
            setup_logging(log_to_file=log_file, file_log_level=log_level, log_to_console=log_to_console)
        else:
            setup_logging(log_to_file=log_file, log_to_console=log_to_console)
    else:
        if log_level is not None:
            setup_logging(console_log_level=log_level, log_to_console=log_to_console)
        else:
            setup_logging(log_to_console=log_to_console)

    logger.info("Starting simulation using a temporary data storage at {}".format(tmp_filebasename))

    # Set the current datacollection version
    set_datacollection_version(tmp_filebasename)

    # Save additional data
    if save_additional_data:
        additional_data = {}
    else:
        additional_data = None
    # Set up the simulation
    setup_simulation()

    # Get absolute path to config
    abs_config_path = sim_dir + config

    # Create the network
    network = setup_physical_network(abs_config_path)
    # Recompute the timings of the heralded connection, depending on measure_directly
    nodeA = network.get_node_by_id(0)
    nodeB = network.get_node_by_id(1)
    # print(nodeA.qmem._memory_positions[0]._connections[1])
    # print(nodeA.qmem._memory_positions[0].get_gate(CNOTGate(), 1))
    # raise RuntimeError()
    mhp_conn = network.get_connection(nodeA, nodeB, "mhp_conn")
    mhp_conn.set_timings(t_cycle=t_cycle, t0=t0)
    if save_additional_data:
        additional_data["mhp_t_cycle"] = mhp_conn.t_cycle

    # Setup entanglement generation protocols
    egpA, egpB = setup_network_protocols(network, alphaA=alphaA, alphaB=alphaB, num_priorities=num_priorities,
                                         egp_queue_weights=egp_queue_weights, collect_queue_data=collect_queue_data)
    if save_additional_data:
        additional_data["alphaA"] = alphaA
        additional_data["alphaB"] = alphaB

    # Get start time
    start_time = time()

    # Check if any max_times should be infinite
    if max_wall_time == 0:
        max_wall_time = float('inf')
    if max_sim_time == 0:
        max_sim_time = float('inf')
    if max_mhp_cycle == 0:
        max_mhp_cycle = float('inf')
    max_sim_time = min(max_sim_time, mhp_conn.t_cycle * max_mhp_cycle / SECOND)

    # Create scenarios which act as higher layers communicating with the EGPs
    alice_scenario, bob_scenario = create_scenarios(egpA, egpB, request_cycle, request_paramsA, request_paramsB,
                                                    additional_data)

    # Hook up data collectors to the scenarios
    collectors = setup_data_collection(alice_scenario, bob_scenario, max_sim_time, tmp_filebasename,
                                       collect_queue_data=collect_queue_data)

    # Start the simulation
    network.start()

    # Debugging
    if enable_pdb:
        pdb.set_trace()

    # Start with a step size of 1 millisecond
    timestep = min(1e3, max_sim_time * SECOND)

    logger.info("Beginning simulation")

    # Keep track of the number of steps taken
    timesteps_taken = 0

    # Keep track of total wall time (not including preparation)
    wall_time_of_simulation = 0

    try:
        # Run simulation
        while sim_time() < max_sim_time * SECOND:
            # Check wall time during this simulation step
            wall_time_sim_step_start = time()
            if timestep == float('inf') or timestep == -float('inf'):
                raise RuntimeError()
            sim_run(duration=timestep)
            previous_timestep = timestep
            wall_time_sim_step_stop = time()
            wall_time_sim_step_duration = wall_time_sim_step_stop - wall_time_sim_step_start
            wall_time_of_simulation += wall_time_sim_step_duration
            wall_time_s_per_real_time_ns = wall_time_of_simulation / sim_time()
            timesteps_taken += 1

            # Check if wall_time_sim_step_duration is zero
            if wall_time_sim_step_duration == 0:
                # Just make the timestep 10 times bigger since last duration went very quick
                timestep = timestep * 10
            else:
                # Update the timestep such that one timestep takes 'wall_time_per_timestep' seconds
                timestep = wall_time_per_timestep / wall_time_s_per_real_time_ns

            # Don't use a timestep that goes beyong max_sim_time
            if (sim_time() + timestep) > (max_sim_time * SECOND):
                timestep = (max_sim_time * SECOND) - sim_time()

            # Check clock once in a while
            now = time()
            logger.info(
                "Wall clock advanced {} s during the last {} s real time. Will now advance {} s real time.".format(
                    wall_time_sim_step_duration, previous_timestep / SECOND, timestep / SECOND))
            mhp_cycles = math.floor(sim_time() / mhp_conn.t_cycle)
            if name:
                info_message = "Simulation: \"{}\": ".format(name)
                info_message += "Time advanced: {}/{} s real time.  ".format(sim_time() / SECOND, max_sim_time)
                info_message += "{}/{} s wall time. ".format(now - start_time, max_wall_time)
                info_message += "{}/{} MHP cycles".format(mhp_cycles, max_mhp_cycle)
                logger.info(info_message)

            else:
                info_message = "Time advanced: {}/{} s real time.  ".format(sim_time() / SECOND, max_sim_time)
                info_message += "{}/{} s wall time. ".format(now - start_time, max_wall_time)
                info_message += "{}/{} MHP cycles".format(mhp_cycles, max_mhp_cycle)
                logger.info(info_message)

            # Save additional data relevant for the simulation
            if save_additional_data:
                # Collect simulation times
                additional_data["total_real_time"] = sim_time()
                additional_data["total_wall_time"] = time() - start_time

                # Collect probability of success
                midpoint = mhp_conn.midPoint
                if midpoint._nr_of_meas > 0:
                    additional_data["p_succ"] = midpoint._nr_of_succ / midpoint._nr_of_meas
                else:
                    additional_data["p_succ"] = None
                with open(tmp_filebasename + "_additional_data.json", 'w') as json_file:
                    json.dump(additional_data, json_file, indent=4)

            # Commit the data collected in the data-sequences
            for collector in collectors:
                collector.commit_data()

            if now - start_time > max_wall_time:
                logger.info("Max wall time reached, ending simulation.")
                break

            if log_to_file and filter_debug_logging:
                clean_log_files(log_file)

        stop_time = time()
        logger.info("Finished simulation, took {} (s) wall time and {} (s) real time".format(stop_time - start_time,
                                                                                             sim_time() / SECOND))

        if log_to_file and filter_debug_logging:
            clean_log_files(log_file)

    # Allow for Ctrl-C-ing out of a simulation in a manner that commits data to the databases
    except Exception:
        logger.exception("Something went wrong. Ending simulation early!")

    finally:
        # Debugging
        if enable_pdb:
            pdb.set_trace()