def traffic_per_group_per_tenant_for_overlay(self): traffic_per_group_per_tenant_for_overlay = [] for t in bar_range( self.num_tenants, desc='data:traffic_per_group_per_tenant_for_overlay:'): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] leafs_map = group_map['leafs_map'] # pods_map = group_map['pods_map'] leafs_traffic = 0 for l in leafs_map: leaf_map = leafs_map[l] leaf_host_count = popcount(leaf_map['bitmap']) leafs_traffic += 2 * (leaf_host_count - 1) traffic_per_group_per_tenant_for_overlay += [ (6 * len(leafs_map)) + leafs_traffic ] traffic_per_group_per_tenant_for_overlay = \ pd.Series(traffic_per_group_per_tenant_for_overlay) return traffic_per_group_per_tenant_for_overlay
def _run(self): for t in bar_range(self.num_tenants, desc='optimizer:%s' % self.algorithm): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] nodes_map = group_map[ 'leafs_map'] if self.node_type == 'leafs' else group_map[ 'pods_map'] start = timer() header_size, default_bitmap = algorithms.run( algorithm=self.algorithm, nodes_map=nodes_map, max_bitmaps=self.num_bitmaps, max_nodes_per_bitmap=self.num_leafs_per_bitmap, redundancy_per_bitmap=self.redundancy_per_bitmap, rules_count_map=self.rules_count_map, max_rules=self.num_rules, probability=self.probability, num_ports_per_node=self.num_ports_per_node, node_id_width=self.node_id_width) group_map['%s_header_size' % self.node_type] = header_size group_map['%s_default_bitmap' % self.node_type] = default_bitmap end = timer() self.algorithm_elapse_time += [end - start]
def traffic_per_group_per_tenant_for_unicast(self): _traffic_per_group_per_tenant_for_unicast = [] for t in bar_range( self.num_tenants, desc='data:traffic_per_group_per_tenant_for_unicast:'): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] leafs_map = group_map['leafs_map'] # pods_map = group_map['pods_map'] leafs_traffic = 0 for l in leafs_map: leaf_map = leafs_map[l] leafs_traffic += popcount(leaf_map['bitmap']) _traffic_per_group_per_tenant_for_unicast += [ 6 * leafs_traffic ] _traffic_per_group_per_tenant_for_unicast = \ pd.Series(_traffic_per_group_per_tenant_for_unicast) return _traffic_per_group_per_tenant_for_unicast
def _process(self): for t in bar_range(self.num_tenants, desc='dynamic'): tenant_maps = self.tenants_maps[t] vm_count = tenant_maps['vm_count'] vm_to_host_map = tenant_maps['vm_to_host_map'] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): self.event.process(vm_count, vm_to_host_map, groups_map[g])
def _get_tenant_groups_to_vms_map(self): for t in bar_range(self.num_tenants, desc='tenants:groups->vms'): tenant_maps = self.tenants_maps[t] vm_count = tenant_maps['vm_count'] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] group_map['vms'] = random.sample(range(vm_count), group_map['size'])
def _get_tenant_groups_to_vms_map_mproc(tenants_maps, num_tenants): for t in bar_range(range(num_tenants), 'tenants:groups->vms'): tenant_maps = tenants_maps[t] vm_count = tenant_maps['vm_count'] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] group_map['vms'] = random.sample(range(vm_count), group_map['size']) return tenants_maps
def _get_tenant_groups_to_sizes_map(self): if self.group_size_dist == 'uniform': for t in bar_range(self.num_tenants, desc='tenants:group sizes'): tenant_maps = self.tenants_maps[t] vm_count = tenant_maps['vm_count'] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): size = random.randint(self.min_group_size, vm_count) groups_map[g]['size'] = size elif self.group_size_dist == 'wve': # ... using mix3 distribution from the dcn-mcast paper. for t in bar_range(self.num_tenants, desc='tenants:group sizes'): tenant_maps = self.tenants_maps[t] vm_count = tenant_maps['vm_count'] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): sample = random.random() if sample < 0.02: size = vm_count - int( random.gammavariate(2, 0.1) * vm_count / 15) % vm_count else: size = int( random.gammavariate(2, 0.2) * vm_count / 15 + self.min_group_size - 1) % vm_count + 1 size = max(size, self.min_group_size) groups_map[g]['size'] = size else: raise ( Exception("invalid dist parameter for group size allocation")) if self.debug: _group_sizes_for_all_tenants = [] for t in range(self.num_tenants): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): _group_sizes_for_all_tenants += [groups_map[g]['size']] print(pd.Series(_group_sizes_for_all_tenants).describe())
def _get_tenant_group_vms_to_types_map(self): for t in bar_range(self.num_tenants, desc='tenants:groups:vms->type'): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] if 'vms_types' not in group_map: group_map['vms_types'] = dict() vms_types = group_map['vms_types'] for vm in group_map['vms']: vms_types[vm] = random.sample(['S', 'B'], 1)[0]
def _process_with_failures(self): self._process() failed_node = None if self.failed_node_type == 'spine': failed_node = random.sample(range(self.num_spines), 1)[0] for t in bar_range(self.num_tenants, desc='dynamic_with_failures'): tenant_maps = self.tenants_maps[t] vm_to_host_map = tenant_maps['vm_to_host_map'] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): self.event.process_with_failures(failed_node, vm_to_host_map, groups_map[g])
def _get_tenant_groups_to_event_count_map(self): sum_of_group_sizes = sum([ group_map['size'] for t in range(self.num_tenants) for group_map in self.tenants_maps[t]['groups_map'] ]) for t in bar_range(self.num_tenants, desc='tenants:groups->event count'): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] event_count = int(group_map['size'] / sum_of_group_sizes * self.num_events) group_map['event_count'] = event_count
def _get_tenant_groups_pods_and_leafs_to_bitmap_map_mproc(tenants_maps, num_tenants, num_hosts_per_leaf, num_leafs_per_pod): for t in bar_range(num_tenants, desc='placement:leafs->bitmap'): tenant_maps = tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] vm_to_host_map = tenant_maps['vm_to_host_map'] for g in range(group_count): group_map = groups_map[g] vms = group_map['vms'] leafs_map = group_map['leafs_map'] pods_map = group_map['pods_map'] for vm in vms: host = vm_to_host_map[vm] leaf = int(host / num_hosts_per_leaf) pod = int(leaf / num_leafs_per_pod) if leaf in leafs_map: leafs_map[leaf]['hosts'] |= {host} else: leafs_map[leaf] = dict() leafs_map[leaf]['hosts'] = {host} if pod in pods_map: pods_map[pod]['leafs'] |= {leaf} else: pods_map[pod] = dict() pods_map[pod]['leafs'] = {leaf} for l in leafs_map: leaf_map = leafs_map[l] leaf_map['bitmap'] = 0 for h in leaf_map['hosts']: leaf_map['bitmap'] |= 1 << (h % num_hosts_per_leaf) del leaf_map['hosts'] for p in pods_map: pod_map = pods_map[p] pod_map['bitmap'] = 0 for l in pod_map['leafs']: pod_map['bitmap'] |= 1 << (l % num_leafs_per_pod) del pod_map['leafs'] return tenants_maps
def traffic_per_group_per_tenant_for_overlay_corrected_params(self): traffic_per_group_per_tenant_for_overlay_pods = [] traffic_per_group_per_tenant_for_overlay_leafs = [] traffic_per_group_per_tenant_for_overlay_pods_traffic = [] for t in bar_range( self.num_tenants, desc= 'data:traffic_per_group_per_tenant_for_overlay_corrected_params:' ): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] leafs_map = group_map['leafs_map'] pods_map = group_map['pods_map'] pods_traffic = 0 for p in pods_map: pod_map = pods_map[p] pod_leaf_count = popcount(pod_map['bitmap']) pods_traffic += 4 * (pod_leaf_count - 1) traffic_per_group_per_tenant_for_overlay_pods += [ len(pods_map) ] traffic_per_group_per_tenant_for_overlay_leafs += [ len(leafs_map) ] traffic_per_group_per_tenant_for_overlay_pods_traffic += [ pods_traffic ] traffic_per_group_per_tenant_for_overlay_pods = \ pd.Series(traffic_per_group_per_tenant_for_overlay_pods) traffic_per_group_per_tenant_for_overlay_leafs = \ pd.Series(traffic_per_group_per_tenant_for_overlay_leafs) traffic_per_group_per_tenant_for_overlay_pods_traffic = \ pd.Series(traffic_per_group_per_tenant_for_overlay_pods_traffic) return traffic_per_group_per_tenant_for_overlay_pods, \ traffic_per_group_per_tenant_for_overlay_leafs, \ traffic_per_group_per_tenant_for_overlay_pods_traffic
def traffic_per_group_per_tenant_for_baseerat(self): _traffic_per_group_per_tenant_for_baseerat = [] for t in bar_range( self.num_tenants, desc='data:traffic_per_group_per_tenant_for_baseerat:'): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] leafs_map = group_map['leafs_map'] pods_map = group_map['pods_map'] leafs_traffic = 0 redundant_leafs = 0 redundant_leafs_traffic = 0 for p in pods_map: pod_map = pods_map[p] if '~bitmap' in pod_map: redundant_leafs += popcount(pod_map['~bitmap']) if 'leafs_default_bitmap' in group_map: redundant_leafs_traffic = redundant_leafs * popcount( group_map['leafs_default_bitmap']) for l in leafs_map: leaf_map = leafs_map[l] leafs_traffic += popcount(leaf_map['bitmap']) if '~bitmap' in leaf_map: leafs_traffic += popcount(leaf_map['~bitmap']) _traffic_per_group_per_tenant_for_baseerat += [ 3 + len(pods_map) + len(leafs_map) + leafs_traffic + redundant_leafs + redundant_leafs_traffic ] _traffic_per_group_per_tenant_for_baseerat = \ pd.Series(_traffic_per_group_per_tenant_for_baseerat) return _traffic_per_group_per_tenant_for_baseerat
def group_size_per_group_per_tenant(self): _group_size_per_group_per_tenant = [] for t in bar_range(self.num_tenants, "data:group_size_per_group_per_tenant:"): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): _group_size_per_group_per_tenant += [groups_map[g]['size']] _group_size_per_group_per_tenant = pd.Series( _group_size_per_group_per_tenant) if self.log_dir is not None: _group_size_per_group_per_tenant.to_csv( self.log_dir + "/group_size_per_group_per_tenant.csv") return _group_size_per_group_per_tenant
def groups_covered_with_bitmaps_only(self, node_type): _groups_covered_with_bitmaps_only = 0 _groups_covered_with_bitmaps_only_without_default_bitmap = 0 for t in bar_range(self.num_tenants, desc='data:groups_covered_with_bitmaps_only:'): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] nodes_map = group_map[ 'leafs_map'] if node_type == 'leafs' else group_map[ 'pods_map'] has_rule = reduce( lambda x, y: x | y, ['has_rule' in nodes_map[n] for n in nodes_map]) if not has_rule: _groups_covered_with_bitmaps_only += 1 if ('%s_default_bitmap' % node_type) not in group_map: _groups_covered_with_bitmaps_only_without_default_bitmap += 1 else: if group_map['%s_default_bitmap' % node_type] == 0: _groups_covered_with_bitmaps_only_without_default_bitmap += 1 df_groups_covered_with_bitmaps_only = pd.DataFrame() df_groups_covered_with_bitmaps_only['bitmaps'] = pd.Series( _groups_covered_with_bitmaps_only) df_groups_covered_with_bitmaps_only['bitmaps_without_default_bitmap'] = \ pd.Series(_groups_covered_with_bitmaps_only_without_default_bitmap) if self.log_dir is not None: df_groups_covered_with_bitmaps_only.to_csv( self.log_dir + "/groups_covered_with_bitmaps_only_for_%s.csv" % node_type, index=False) return df_groups_covered_with_bitmaps_only
def _colocate_pods__uniform_hosts(self): available_pods = list(range(self.num_pods)) available_hosts_per_pod = [None] * self.num_pods available_hosts_count_per_pod = [None] * self.num_pods for p in range(self.num_pods): available_hosts_per_pod[p] = [(((p * self.num_leafs_per_pod) + l) * self.num_hosts_per_leaf) + h for l in range(self.num_leafs_per_pod) for h in range(self.num_hosts_per_leaf)] available_hosts_count_per_pod[p] = [0] * self.num_leafs_per_pod * self.num_hosts_per_leaf tenants_maps = self.tenants_maps for t in bar_range(self.num_tenants, desc='placement:vms->host'): tenant_maps = tenants_maps[t] vm_to_host_map = tenant_maps['vm_to_host_map'] vm_index = 0 vm_count = tenant_maps['vm_count'] while vm_count > 0: selected_pod = random.sample(available_pods, 1)[0] selected_pod_index = available_pods.index(selected_pod) selected_hosts = available_hosts_per_pod[selected_pod_index] selected_hosts_count = available_hosts_count_per_pod[selected_pod_index] sampled_hosts = random.sample(selected_hosts, min(len(selected_hosts), vm_count)) for h in sampled_hosts: vm_to_host_map[vm_index] = h selected_hosts_count[selected_hosts.index(h)] += 1 vm_index += 1 vm_count -= len(sampled_hosts) removed_hosts_indexes = [i for i, c in enumerate(selected_hosts_count) if c == self.max_vms_per_host] for i in sorted(removed_hosts_indexes, reverse=True): del selected_hosts[i] del selected_hosts_count[i] if len(selected_hosts) == 0: del available_pods[selected_pod_index] del available_hosts_per_pod[selected_pod_index] del available_hosts_count_per_pod[selected_pod_index]
def traffic_overhead_per_group_per_tenant(self, node_type): _traffic_overhead_per_group_per_tenant = [] for t in bar_range(self.num_tenants, desc='data:traffic_overhead_per_group_per_tenant:'): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] nodes_map = group_map[ 'leafs_map'] if node_type == 'leafs' else group_map[ 'pods_map'] _actual_traffic = 0 _redundant_traffic = 0 for n in nodes_map: node_map = nodes_map[n] _actual_traffic += popcount(node_map['bitmap']) if '~bitmap' in node_map: _redundant_traffic += popcount(node_map['~bitmap']) _traffic_overhead_per_group_per_tenant += [ _redundant_traffic / _actual_traffic ] _traffic_overhead_per_group_per_tenant = pd.Series( _traffic_overhead_per_group_per_tenant) if self.log_dir is not None: _traffic_overhead_per_group_per_tenant.to_csv( self.log_dir + "/traffic_overhead_per_group_per_tenant_for_%s.csv" % node_type, index=False) return _traffic_overhead_per_group_per_tenant
def _get_tenant_to_group_count_map(self): # ... weighted assignment of groups (based on VMs) to tenants _vm_count = self.tenants['vm_count'] _group_count = 0 for t in bar_range(self.num_tenants, desc='tenants:group count'): tenant_maps = self.tenants_maps[t] group_count = int(tenant_maps['vm_count'] / _vm_count * self.num_groups) tenant_maps['group_count'] = group_count _group_count += group_count self.tenants['group_count'] = _group_count if self.debug: print( pd.Series([ self.tenants_maps[t]['group_count'] for t in range(self.num_tenants) ]).describe()) print("Sum: %s" % sum( pd.Series([ self.tenants_maps[t]['group_count'] for t in range(self.num_tenants) ])))
def leaf_count_per_group_per_tenant(self): _leaf_count_per_group_per_tenant = [] for t in bar_range(self.num_tenants, "data:leaf_count_per_group_per_tenant:"): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): _leaf_count_per_group_per_tenant += [ len(groups_map[g]['leafs_map']) ] _leaf_count_per_group_per_tenant = pd.Series( _leaf_count_per_group_per_tenant) if self.log_dir is not None: _leaf_count_per_group_per_tenant.to_csv( self.log_dir + "/leaf_count_per_group_per_tenant.csv", index=False) return _leaf_count_per_group_per_tenant
def _get_tenant_to_vm_count_map(self): if self.vm_dist == 'expon': _vm_count = 0 for t in bar_range(self.num_tenants, desc='tenants:vm count'): sample = random.random() if sample < 0.02: vm_count = random.randint(self.min_vms, self.max_vms) else: vm_count = int((random.expovariate(4.05) / 10) * (self.max_vms - self.min_vms)) \ % (self.max_vms - self.min_vms) + self.min_vms self.tenants_maps[t]['vm_count'] = vm_count _vm_count += vm_count self.tenants['vm_count'] = _vm_count else: raise (Exception("invalid dist parameter for vm allocation")) if self.debug: print( pd.Series([ self.tenants_maps[t]['vm_count'] for t in range(self.num_tenants) ]).describe()) print("VM Count: %s" % self.tenants['vm_count'])
def traffic_per_group_per_tenant_for_overlay_corrected(self): traffic_per_group_per_tenant_for_overlay_corrected = [] for t in bar_range( self.num_tenants, desc='data:traffic_per_group_per_tenant_for_overlay_corrected:' ): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] leafs_map = group_map['leafs_map'] pods_map = group_map['pods_map'] pods_traffic = 0 leafs_traffic = 0 for p in pods_map: pod_map = pods_map[p] pod_leaf_count = popcount(pod_map['bitmap']) pods_traffic += 4 * (pod_leaf_count - 1) for l in leafs_map: leaf_map = leafs_map[l] leaf_host_count = popcount(leaf_map['bitmap']) leafs_traffic += 2 * (leaf_host_count - 1) traffic_per_group_per_tenant_for_overlay_corrected += [ (6 * len(pods_map)) + pods_traffic + leafs_traffic ] traffic_per_group_per_tenant_for_overlay_corrected = \ pd.Series(traffic_per_group_per_tenant_for_overlay_corrected) return traffic_per_group_per_tenant_for_overlay_corrected
def traffic_per_group_per_tenant_for_baseerat_bytes(self): _traffic_per_group_per_tenant_for_baseerat_bytes = [] for t in bar_range( self.num_tenants, desc='data:traffic_per_group_per_tenant_for_baseerat_bytes:'): tenant_maps = self.tenants_maps[t] group_count = tenant_maps['group_count'] groups_map = tenant_maps['groups_map'] for g in range(group_count): group_map = groups_map[g] leafs_map = group_map['leafs_map'] pods_map = group_map['pods_map'] leafs_traffic = 0 redundant_leafs = 0 redundant_leafs_traffic = 0 for p in pods_map: pod_map = pods_map[p] if '~bitmap' in pod_map: redundant_leafs += popcount(pod_map['~bitmap']) if 'leafs_default_bitmap' in group_map: redundant_leafs_traffic = redundant_leafs * popcount( group_map['leafs_default_bitmap']) for l in leafs_map: leaf_map = leafs_map[l] leafs_traffic += popcount(leaf_map['bitmap']) if '~bitmap' in leaf_map: leafs_traffic += popcount(leaf_map['~bitmap']) upstream_leaf_bits = self.num_hosts_per_leaf + self.num_spines_per_pod upstream_spine_bits = self.num_leafs_per_pod + self.num_cores core_bits = self.num_pods downstream_spine_bits = group_map['pods_header_size'] downstream_leaf_bits = group_map['leafs_header_size'] header_size_bits = (upstream_leaf_bits + upstream_spine_bits + core_bits + downstream_spine_bits + downstream_leaf_bits) host_to_leaf_edge_bits = header_size_bits leaf_to_spine_edge_bits = host_to_leaf_edge_bits - upstream_leaf_bits spine_to_core_edge_bits = leaf_to_spine_edge_bits - upstream_spine_bits core_to_spine_edge_bits = spine_to_core_edge_bits - core_bits spine_to_leaf_edge_bits = core_to_spine_edge_bits - downstream_spine_bits _traffic_per_group_per_tenant_for_baseerat_bytes += \ [(host_to_leaf_edge_bits + leaf_to_spine_edge_bits + spine_to_core_edge_bits + (len(pods_map) * core_to_spine_edge_bits) + (len(leafs_map) * spine_to_leaf_edge_bits) + (redundant_leafs * spine_to_leaf_edge_bits)) / 8] _traffic_per_group_per_tenant_for_baseerat_bytes = \ pd.Series(_traffic_per_group_per_tenant_for_baseerat_bytes) if self.log_dir is not None: _traffic_per_group_per_tenant_for_baseerat_bytes.to_csv( self.log_dir + "/traffic_per_group_per_tenant_for_baseerat_bytes.csv", index=False) return _traffic_per_group_per_tenant_for_baseerat_bytes