def test_irregular_graph(self): """Tests graphs that are not necessarily regular - some number of sources and destinations have no edges.""" generator = graph_util() num_experiments = 100 n_nodes = 256 # network with 8 racks of 32 nodes each n_racks = n_nodes / structures.MAX_NODES_PER_RACK for i in range(num_experiments): # generate admitted traffic g_p = generator.generate_random_regular_bipartite(n_nodes, 1) # choose a number of edges to remove num_edges_to_remove = random.randint(1, 256) # remove edges for j in range(num_edges_to_remove): while (True): # choose an edge index at random index = random.randint(0, n_nodes - 1) edge = g_p.edges(index) if edge != []: edge_tuple = edge[0] g_p.remove_edge(edge_tuple[0], edge_tuple[1]) break admitted = structures.create_admitted_traffic() admitted_copy = structures.create_admitted_traffic() for edge in g_p.edges_iter(): structures.insert_admitted_edge(admitted, edge[0], edge[1] - n_nodes) structures.insert_admitted_edge(admitted_copy, edge[0], edge[1] - n_nodes) # select paths pathselection.select_paths(admitted, n_racks) # check that path assignments are valid self.assertTrue(pathselection.paths_are_valid(admitted, n_racks)) # check that src addrs and lower bits of destination addrs are unchanged for e in range(admitted.size): edge = structures.get_admitted_edge(admitted, e) edge_copy = structures.get_admitted_edge(admitted_copy, e) self.assertEqual(edge.src, edge_copy.src) self.assertEqual(edge.dst & pathselection.PATH_MASK, edge_copy.dst & pathselection.PATH_MASK) # clean up structures.destroy_admitted_traffic(admitted) pass
def test_regular_graph(self): """Basic test involving graphs that are already regular.""" generator = graph_util() num_experiments = 10 n_nodes = 256 # network with 8 racks of 32 nodes each n_racks = n_nodes / structures.MAX_NODES_PER_RACK for i in range(num_experiments): # generate admitted traffic g_p = generator.generate_random_regular_bipartite(n_nodes, 1) admitted = structures.create_admitted_traffic() admitted_copy = structures.create_admitted_traffic() for edge in g_p.edges_iter(): structures.insert_admitted_edge(admitted, edge[0], edge[1] - n_nodes) structures.insert_admitted_edge(admitted_copy, edge[0], edge[1] - n_nodes) # select paths pathselection.select_paths(admitted, n_racks) # check that path assignments are valid self.assertTrue(pathselection.paths_are_valid(admitted, n_racks)) # check that src addrs and lower bits of destination addrs are unchanged for e in range(admitted.size): edge = structures.get_admitted_edge(admitted, e) edge_copy = structures.get_admitted_edge(admitted_copy, e) self.assertEqual(edge.src, edge_copy.src) self.assertEqual(edge.dst & pathselection.PATH_MASK, edge_copy.dst & pathselection.PATH_MASK) # clean up structures.destroy_admitted_traffic(admitted) pass
def test_oversubscribed(self): """Tests networks which are oversubscribed on the uplinks from racks/downlinks to racks.""" # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out = fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(True, 2, 0, 128, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) # Make requests that could overfill the links above the ToRs admissible.add_backlog(status, 0, 32, 1) admissible.add_backlog(status, 1, 64, 1) admissible.add_backlog(status, 2, 96, 1) admissible.add_backlog(status, 33, 65, 1) admissible.add_backlog(status, 97, 66, 1) # Get admissible traffic admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Check that we admitted at most 2 packets for each of the # oversubscribed links admitted = admissible.dequeue_admitted_traffic(status) rack_0_out = 0 rack_2_in = 0 for e in range(admitted.size): edge = structures.get_admitted_edge(admitted, e) if structures.get_rack_from_id(edge.src) == 0: rack_0_out += 1 if structures.get_rack_from_id(edge.dst) == 2: rack_2_in += 1 self.assertEqual(rack_0_out, 2) self.assertEqual(rack_2_in, 2) # should clean up memory pass
def test_out_of_boundary(self): """Tests traffic to destinations out of the scheduling boundary.""" # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out = fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(False, 0, 2, 6, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) # Make requests that could overfill the links out of the scheduling boundary dst = structures.OUT_OF_BOUNDARY_NODE_ID admissible.add_backlog(status, 0, dst, 1) admissible.add_backlog(status, 1, dst, 1) admissible.add_backlog(status, 2, dst, 1) admissible.add_backlog(status, 3, dst, 1) admissible.add_backlog(status, 4, dst, 1) admissible.add_backlog(status, 5, dst, 1) # Get admissible traffic admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Check that we admitted at most 2 out of the boundary per timeslot for # first 3 timeslots for i in range(0, 3): admitted_i = admissible.dequeue_admitted_traffic(status) self.assertEqual(admitted_i.size, 2) for e in range(admitted_i.size): edge = structures.get_admitted_edge(admitted_i, e) self.assertEqual(edge.src, 2 * i + e) # Check that we admitted none for the remainder of the batch for i in range(3, structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) self.assertEqual(admitted_i.size, 0) # should clean up memory pass
def test_out_of_boundary(self): """Tests traffic to destinations out of the scheduling boundary.""" # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out= fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(False, 0, 2, 6, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) # Make requests that could overfill the links out of the scheduling boundary dst = structures.OUT_OF_BOUNDARY_NODE_ID admissible.add_backlog(status, 0, dst, 1) admissible.add_backlog(status, 1, dst, 1) admissible.add_backlog(status, 2, dst, 1) admissible.add_backlog(status, 3, dst, 1) admissible.add_backlog(status, 4, dst, 1) admissible.add_backlog(status, 5, dst, 1) # Get admissible traffic admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Check that we admitted at most 2 out of the boundary per timeslot for # first 3 timeslots for i in range(0, 3): admitted_i = admissible.dequeue_admitted_traffic(status) self.assertEqual(admitted_i.size, 2) for e in range(admitted_i.size): edge = structures.get_admitted_edge(admitted_i, e) self.assertEqual(edge.src, 2 * i + e) # Check that we admitted none for the remainder of the batch for i in range(3, structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) self.assertEqual(admitted_i.size, 0) # should clean up memory pass
def test_oversubscribed(self): """Tests networks which are oversubscribed on the uplinks from racks/downlinks to racks.""" # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out= fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(True, 2, 0, 128, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) # Make requests that could overfill the links above the ToRs admissible.add_backlog(status, 0, 32, 1) admissible.add_backlog(status, 1, 64, 1) admissible.add_backlog(status, 2, 96, 1) admissible.add_backlog(status, 33, 65, 1) admissible.add_backlog(status, 97, 66, 1) # Get admissible traffic admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Check that we admitted at most 2 packets for each of the # oversubscribed links admitted = admissible.dequeue_admitted_traffic(status) rack_0_out = 0 rack_2_in = 0 for e in range(admitted.size): edge = structures.get_admitted_edge(admitted, e) if structures.get_rack_from_id(edge.src) == 0: rack_0_out += 1 if structures.get_rack_from_id(edge.dst) == 2: rack_2_in += 1 self.assertEqual(rack_0_out, 2) self.assertEqual(rack_2_in, 2) # should clean up memory pass
def run_round_robin_admissible(self): # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out= fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(False, 0, 0, self.num_nodes, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) num_admitted = 0 num_requested = 0 # TODO: can we run this so that request arrivals are inter-leaved with # getting admissible traffic? would be more realistic. current_request = 0 req_tuple = self.requests[current_request] req = req_tuple[0] req_size = req_tuple[1] for t in range(self.duration): # Issue new requests while int(req.time) == t: num_requested += req_size admissible.add_backlog(status, req.src, req.dst, req_size) self.pending_requests[(req.src, req.dst)].append(pending_request(req_size, t)) current_request += 1 req_tuple = self.requests[current_request] req = req_tuple[0] req_size = req_tuple[1] if t % structures.BATCH_SIZE != structures.BATCH_SIZE - 1: continue # Get admissible traffic for this batch admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Record stats for i in range(structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) if t > self.warm_up_duration: num_admitted += admitted_i.size for e in range(admitted_i.size): edge = structures.get_admitted_edge(admitted_i, e) req_list = self.pending_requests[(edge.src, edge.dst)] if len(req_list) < 1: raise AssertionError req_list[0].size -= 1 if req_list[0].size == 0: if t > self.warm_up_duration: # record flow completion time last_t_slot = t + i fct = last_t_slot - req_list[0].request_time self.flow_completion_times.append((fct, req_list[0].request_time, last_t_slot)) del req_list[0] capacity = (self.duration - self.warm_up_duration) * self.num_nodes observed_util = float(num_admitted) / capacity # should clean up memory return observed_util
def run_round_robin_admissible(self): # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out = fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(False, 0, 0, self.num_nodes, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) num_admitted = 0 num_requested = 0 # TODO: can we run this so that request arrivals are inter-leaved with # getting admissible traffic? would be more realistic. current_request = 0 req_tuple = self.requests[current_request] req = req_tuple[0] req_size = req_tuple[1] for t in range(self.duration): # Issue new requests while int(req.time) == t: num_requested += req_size admissible.add_backlog(status, req.src, req.dst, req_size) self.pending_requests[(req.src, req.dst)].append( pending_request(req_size, t)) current_request += 1 req_tuple = self.requests[current_request] req = req_tuple[0] req_size = req_tuple[1] if t % structures.BATCH_SIZE != structures.BATCH_SIZE - 1: continue # Get admissible traffic for this batch admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Record stats for i in range(structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) if t > self.warm_up_duration: num_admitted += admitted_i.size for e in range(admitted_i.size): edge = structures.get_admitted_edge(admitted_i, e) req_list = self.pending_requests[(edge.src, edge.dst)] if len(req_list) < 1: raise AssertionError req_list[0].size -= 1 if req_list[0].size == 0: if t > self.warm_up_duration: # record flow completion time last_t_slot = t + i fct = last_t_slot - req_list[0].request_time self.flow_completion_times.append( (fct, req_list[0].request_time, last_t_slot)) del req_list[0] capacity = (self.duration - self.warm_up_duration) * self.num_nodes observed_util = float(num_admitted) / capacity # should clean up memory return observed_util
def test_reset_sender(self): '''Tests resetting a sender.''' # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out = fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(False, 0, 0, 21, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) # Make requests admissible.add_backlog(status, 0, 10, structures.BATCH_SIZE) admissible.add_backlog(status, 1, 10, structures.BATCH_SIZE) admissible.add_backlog(status, 0, 20, structures.BATCH_SIZE) # Get admissible traffic admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Check admitted traffic for i in range(structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) if i % 2 == 0: self.assertEqual(admitted_i.size, 1) edge = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge.src, 0) self.assertEqual(edge.dst, 10) else: self.assertEqual(admitted_i.size, 2) edge_0 = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge_0.src, 1) self.assertEqual(edge_0.dst, 10) edge_1 = structures.get_admitted_edge(admitted_i, 1) self.assertEqual(edge_1.src, 0) self.assertEqual(edge_1.dst, 20) # Reset src 0 admissible.reset_sender(status, 0) # Get admissible traffic again admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Check that we admit only one more packet for each of src 0's # pending flows for i in range(structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) if i == 0: self.assertEqual(admitted_i.size, 1) edge = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge.src, 0) self.assertEqual(edge.dst, 10) elif i == 1: self.assertEqual(admitted_i.size, 2) edge_0 = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge_0.src, 1) self.assertEqual(edge_0.dst, 10) edge_1 = structures.get_admitted_edge(admitted_i, 1) self.assertEqual(edge_1.src, 0) self.assertEqual(edge_1.dst, 20) elif i < structures.BATCH_SIZE / 2 + 1: self.assertEqual(admitted_i.size, 1) edge = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge.src, 1) self.assertEqual(edge.dst, 10) else: self.assertEqual(admitted_i.size, 0) # should clean up memory pass
def test_many_requests(self): """Tests the admissible algorithm over a long time, including oversubscription.""" n_nodes = 64 max_r_per_t = 10 # max requests per timeslot duration = 100000 max_size = 20 rack_capacity = 24 # Track pending requests - mapping from src/dst to num requested pending_requests = {} # Track total demands cumulative_demands = {} # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out = fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(True, rack_capacity, 0, n_nodes, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) num_admitted = 0 num_requested = 0 for b in range(duration / structures.BATCH_SIZE): # Make some new requests for t in range(structures.BATCH_SIZE): requests_per_timeslot = random.randint(0, max_r_per_t) for r in range(requests_per_timeslot): src = random.randint(0, n_nodes - 1) dst = random.randint(0, n_nodes - 2) if (dst >= src): dst += 1 # don't send to self size = random.randint(1, max_size) demand = cumulative_demands.get((src, dst), 0) demand += size cumulative_demands[(src, dst)] = demand if (src, dst) in pending_requests.keys(): pending_requests[( src, dst)] = pending_requests[(src, dst)] + size else: pending_requests[(src, dst)] = size admissible.add_backlog(status, src, dst, size) num_requested += size # Get admissible traffic for this batch admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) for i in range(structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) num_admitted += admitted_i.size # Check all admitted edges - make sure they were requested # and have not yet been fulfilled self.assertTrue(admitted_i.size <= n_nodes) rack_outputs = [0, 0] rack_inputs = [0, 0] for e in range(admitted_i.size): edge = structures.get_admitted_edge(admitted_i, e) pending_count = pending_requests[(edge.src, edge.dst)] self.assertTrue(pending_count >= 1) if pending_count > 1: pending_requests[(edge.src, edge.dst)] = pending_count - 1 else: del pending_requests[(edge.src, edge.dst)] rack_outputs[structures.get_rack_from_id(edge.src)] += 1 rack_inputs[structures.get_rack_from_id(edge.dst)] += 1 for index in range(len(rack_outputs)): self.assertTrue(rack_outputs[index] <= rack_capacity) self.assertTrue(rack_inputs[index] <= rack_capacity) print 'requested %d, admitted %d, capacity %d' % ( num_requested, num_admitted, duration * n_nodes) # should clean up memory pass
def test_reset_sender(self): '''Tests resetting a sender.''' # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out= fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(False, 0, 0, 21, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) # Make requests admissible.add_backlog(status, 0, 10, structures.BATCH_SIZE) admissible.add_backlog(status, 1, 10, structures.BATCH_SIZE) admissible.add_backlog(status, 0, 20, structures.BATCH_SIZE) # Get admissible traffic admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Check admitted traffic for i in range(structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) if i % 2 == 0: self.assertEqual(admitted_i.size, 1) edge = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge.src, 0) self.assertEqual(edge.dst, 10) else: self.assertEqual(admitted_i.size, 2) edge_0 = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge_0.src, 1) self.assertEqual(edge_0.dst, 10) edge_1 = structures.get_admitted_edge(admitted_i, 1) self.assertEqual(edge_1.src, 0) self.assertEqual(edge_1.dst, 20) # Reset src 0 admissible.reset_sender(status, 0) # Get admissible traffic again admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) # Check that we admit only one more packet for each of src 0's # pending flows for i in range(structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) if i == 0: self.assertEqual(admitted_i.size, 1) edge = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge.src, 0) self.assertEqual(edge.dst, 10) elif i == 1: self.assertEqual(admitted_i.size, 2) edge_0 = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge_0.src, 1) self.assertEqual(edge_0.dst, 10) edge_1 = structures.get_admitted_edge(admitted_i, 1) self.assertEqual(edge_1.src, 0) self.assertEqual(edge_1.dst, 20) elif i < structures.BATCH_SIZE / 2 + 1: self.assertEqual(admitted_i.size, 1) edge = structures.get_admitted_edge(admitted_i, 0) self.assertEqual(edge.src, 1) self.assertEqual(edge.dst, 10) else: self.assertEqual(admitted_i.size, 0) # should clean up memory pass
def test_many_requests(self): """Tests the admissible algorithm over a long time, including oversubscription.""" n_nodes = 64 max_r_per_t = 10 # max requests per timeslot duration = 100000 max_size = 20 rack_capacity = 24 # Track pending requests - mapping from src/dst to num requested pending_requests = {} # Track total demands cumulative_demands = {} # initialization q_bin = fpring.fp_ring_create(structures.NUM_BINS_SHIFT) q_urgent = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT + 1) q_head = fpring.fp_ring_create(2 * structures.FP_NODES_SHIFT) q_admitted_out= fpring.fp_ring_create(structures.BATCH_SHIFT) core = structures.create_admission_core_state() structures.alloc_core_init(core, q_bin, q_bin, q_urgent, q_urgent) status = structures.create_admissible_status(True, rack_capacity, 0, n_nodes, q_head, q_admitted_out) admitted_batch = structures.create_admitted_batch() for i in range(0, structures.NUM_BINS): empty_bin = structures.create_bin(structures.LARGE_BIN_SIZE) fpring.fp_ring_enqueue(q_bin, empty_bin) admissible.enqueue_head_token(q_urgent) num_admitted = 0 num_requested = 0 for b in range(duration / structures.BATCH_SIZE): # Make some new requests for t in range(structures.BATCH_SIZE): requests_per_timeslot = random.randint(0, max_r_per_t) for r in range(requests_per_timeslot): src = random.randint(0, n_nodes-1) dst = random.randint(0, n_nodes-2) if (dst >= src): dst += 1 # don't send to self size = random.randint(1, max_size) demand = cumulative_demands.get((src, dst), 0) demand += size cumulative_demands[(src, dst)] = demand if (src, dst) in pending_requests.keys(): pending_requests[(src, dst)] = pending_requests[(src, dst)] + size else: pending_requests[(src, dst)] = size admissible.add_backlog(status, src, dst, size) num_requested += size # Get admissible traffic for this batch admissible.get_admissible_traffic(core, status, admitted_batch, 0, 1, 0) for i in range(structures.BATCH_SIZE): admitted_i = admissible.dequeue_admitted_traffic(status) num_admitted += admitted_i.size # Check all admitted edges - make sure they were requested # and have not yet been fulfilled self.assertTrue(admitted_i.size <= n_nodes) rack_outputs = [0, 0] rack_inputs = [0, 0] for e in range(admitted_i.size): edge = structures.get_admitted_edge(admitted_i, e) pending_count = pending_requests[(edge.src, edge.dst)] self.assertTrue(pending_count >= 1) if pending_count > 1: pending_requests[(edge.src, edge.dst)] = pending_count - 1 else: del pending_requests[(edge.src, edge.dst)] rack_outputs[structures.get_rack_from_id(edge.src)] += 1 rack_inputs[structures.get_rack_from_id(edge.dst)] += 1 for index in range(len(rack_outputs)): self.assertTrue(rack_outputs[index] <= rack_capacity) self.assertTrue(rack_inputs[index] <= rack_capacity) print 'requested %d, admitted %d, capacity %d' % (num_requested, num_admitted, duration * n_nodes) # should clean up memory pass