def test_trace_conversion(self): traceroutes = [['N0', 'N5', 'N4', 'N6'], ['N5', 'N5', 'N4', 'N5'], ['N5', 'N2', 'N4']] traceroutes_check = [[0, 5, 4, 6], [5, 5, 4, 5], [5, 2, 4]] trace_id = vft.trace_in_vertex_id(self.sample_graph, traceroutes) self.assertEqual(traceroutes_check, trace_id) trace_back = vft.trace_in_vertex_name(self.sample_graph, trace_id) self.assertEqual(traceroutes, trace_back)
def test_trace_conversion_error(self): fake_trace = [ ['FAKE', 'FAKE1'], ] fake_trace_id = [ [2, 4, 55], ] with self.assertRaises(ValueError): _ = vft.trace_in_vertex_id(self.sample_graph, fake_trace) with self.assertRaises(IndexError): _ = vft.trace_in_vertex_name(self.sample_graph, fake_trace_id)
def purify(g, meta, filters): results = dict() traceroutes = [x[helpers.TRACE] for x in meta] if 'cc' in filters: results['cc'] = g.transitivity_undirected(mode=igraph.TRANSITIVITY_ZERO) if 'ad' in filters: results['ad'] = g.average_path_length(directed=False, unconn=True) if 'nc' in filters: results['nc'] = g.vcount() if 'ec' in filters: results['ec'] = g.ecount() if 'rc' in filters: k = 20 scores = g.degree() indices = range(g.vcount()) indices.sort(key=scores.__getitem__) e_k = [x for x in g.es if g.degree(x.source) >= k and g.degree(x.target) >= 50] e_k2 = float(2 * len(e_k)) n_k = float(len([x for x in g.vs if g.degree(x) >= k])) fi_k = e_k2 / (n_k * (n_k - 1)) results['rc'] = fi_k # remove traces with unknown nodes before_caida = len(traceroutes) traceroutes = vft.trace_in_vertex_id(g, traceroutes) if 'tc' in filters: results['tc'] = len(traceroutes) if 'tl' in filters: results['tl'] = np.mean([len(x) for x in traceroutes]) if 'tml' in filters: results['tml'] = max([len(x) for x in traceroutes]) results['tml_sentence'] = vft.trace_in_vertex_name(g, [x for x in traceroutes if len(x) == results['tml']])[0] if 'tsl' in filters: results['tsl'] = min([len(x) for x in traceroutes]) results['tsl_sentence'] = vft.trace_in_vertex_name(g, [x for x in traceroutes if len(x) == results['tsl']])[0] if 'rt' in filters: results['rt'] = before_caida - len(traceroutes) if 'vf_prelabeled' in filters: results['vf_prelabeled'] = len([x for x in meta if x[helpers.IS_VF_PRELABELED] == 1]) if 'vf_degree' in filters: results['vf_degree'] = len([x for x in meta if x[helpers.IS_VF_DEGREE] == 1]) if 'vf_closeness' in filters: results['vf_closeness'] = len([x for x in meta if x[helpers.IS_VF_CLOSENESS] == 1]) if 'random_walk_vf_closeness' in filters: results['random_walk_vf_closeness'] = len([x for x in meta if x[helpers.RANDOM_WALK_VF_CLOSENESS_ROUTE] == 1]) if 'lp_soft_prelabeled' in filters: results['lp_soft_prelabeled'] = len([x for x in meta if x[helpers.IS_LP_SOFT_PRELABELED] == 1]) if 'lp_hard_prelabeled' in filters: results['lp_hard_prelabeled'] = len([x for x in meta if x[helpers.IS_LP_HARD_PRELABELED] == 1]) if 'lp_soft_degree' in filters: results['lp_soft_degree'] = len([x for x in meta if x[helpers.IS_LP_SOFT_DEGREE] == 1]) if 'lp_hard_degree' in filters: results['lp_hard_degree'] = len([x for x in meta if x[helpers.IS_LP_HARD_DEGREE] == 1]) if 'lp_soft_closeness' in filters: results['lp_soft_closeness'] = len([x for x in meta if x[helpers.IS_LP_SOFT_CLOSENESS] == 1]) if 'lp_hard_closeness' in filters: results['lp_hard_closeness'] = len([x for x in meta if x[helpers.IS_LP_HARD_CLOSENESS] == 1]) if 'pred' in filters: # SH prediction sh_pred = len([x for x in meta if x[helpers.SH_LEN] == x[helpers.TRACE_LEN]]) # only VF with 1 extra hop ppvf_pred = len([x for x in meta if x[helpers.TRACE_LEN] <= x[helpers.SH_LEN] + 1 and x[helpers.IS_VF_DEGREE] == 1]) # SH or VF with one extra hop smart_pred = len([x for x in meta if x[helpers.SH_LEN] == x[helpers.TRACE_LEN] or (x[helpers.TRACE_LEN] <= x[helpers.SH_LEN] + 1 and x[helpers.IS_VF_DEGREE] == 1)]) # Brute force prediction all_pred = len([x for x in meta if x[helpers.TRACE_LEN] <= x[helpers.SH_LEN] + 1]) results['sh_pred'] = sh_pred results['ppvf_pred'] = ppvf_pred results['smart_pred'] = smart_pred results['all_pred'] = all_pred return results