def testGenerateCollar(self): self.assertEqual(self.sc1.generate_collar(2), S({ 3: 1, 7: 0, 8: 1, 12: 0 })) # Input signal should be normalized in the generate_collar # function self.assertEqual(self.sc2.generate_collar(2), S({ 3: 1, 7: 0, 8: 1, 12: 0 })) self.assertEqual(self.sc3.generate_collar(2), S({ -2: 1, 2: 0, 8: 1, 12: 0 })) # Output signal should be normalized self.assertEqual(self.sc1.generate_collar(5), S({0: 1, 15: 0}))
def testSubtraction(self): self.assertEqual(self.s1 - self.se, self.s1) self.assertEqual(self.se - self.s1, self.se) self.assertEqual(self.s1 - self.si1, S({30: 1, 45: 0})) self.assertEqual(self.si1 - self.s1, S({60: 1, 100: 0})) self.assertEqual(self.sb1 - self.s1, S({0: 1, 100: 0})) self.assertEqual(self.sb1 - self.sb3, S({0: 1, 60: 0})) self.assertEqual(self.sb3 - self.sb1, S({60: 1, 130: 0})) self.assertEqual(self.sb1 - self.se, self.sb1)
def testUnion(self): self.assertEqual(self.se | self.se, self.se) self.assertEqual(self.s1 | self.s1, self.s1) self.assertEqual(self.s1 | self.s2, S({0: 1, 10: 0, 30: 1, 60: 0})) self.assertEqual(self.s2 | self.s1, S({0: 1, 10: 0, 30: 1, 60: 0})) self.assertEqual(self.s1 | self.s5 | self.s6, self.s7) self.assertEqual(self.s5 | self.s7, self.s7) self.assertEqual(self.s7 | self.s5, self.s7) self.assertEqual( S({ '0': 1, '20941': 0 }) | S({ '19337': 1, '19352': 0 }), S({ '0': 1, '20941': 0 })) self.assertEqual( S({ '0': 1, '20941': 0 }) | S({ '4288': 1, '4636': 0 }), S({ '0': 1, '20941': 0 }))
def testSignalMultiAdd(self): self.assertEqual(reduce(add, [self.s1, self.s1, self.s1]), S({ 30: 3, 60: 0 })) self.assertEqual(reduce(add, [self.s1, self.s2, self.s3]), S({ 0: 1, 10: 0, 30: 2, 40: 1, 60: 0 }))
def build_ref_sig(ref, file_framedur_lookup ): #(aligned_pairs, missed_ref, file_framedur_lookup): # Need to modify join, find ways to keep from doing full join each time. # reference stays the same, calculated once. system, just need to include # new false alarms # #print "Building Ref Sig" #num_aligned = len(ref) #aligned_pairs) + len(missed_ref) ref_temp_add = {} not_ref = {} nr_area = {} if ref == {}: #num_aligned == 0: return [{}, {}, {}] for key, value in ref.items(): ref_temp = [temporal_single_signal(b) for b in value] ref_temp_add[key] = reduce(add, [r[0] for r in ref_temp], S()) not_ref[key] = ref_temp_add[key].not_sig(file_framedur_lookup[key]) nr_area[key] = not_ref[key].area() #ref_temp = [temporal_single_signal(b) for b in aligned_pairs ] + [ temporal_single_signal(m) for m in missed_ref ] #list(itertools.chain([temporal_single_signal(b) for b in aligned_pairs], [temporal_single_signal(m) for m in missed_ref])) #[temporal_single_signal(r) for r in combined_ref ] #ref_temp_add=reduce(add, [r[0] for r in ref_temp], S()) #not_ref=ref_temp_add.not_sig(file_framedur_lookup.get(ref_temp[0][1])) #nr_area=not_ref.area() #print "ref_temp_add" #print ref_temp_add #print "not_ref" #print not_ref #print "nr_area" #print nr_area return [ref_temp_add, not_ref, nr_area]
def testIntersection(self): self.assertEqual(self.s1 & self.se, self.se) self.assertEqual(self.s1 & self.s1, self.s1) self.assertEqual(self.s1 & self.s2, self.se) self.assertEqual(self.s1 & self.s3, self.s3) self.assertEqual(self.s3 & self.s1, self.s3) self.assertEqual(self.s1 & self.sb1, self.s1) self.assertEqual(self.s1 & self.si1, S({45: 1, 60: 0}))
def setUp(self): super(TestNMIDE, self).setUp() self.cost_fn = lambda x: 1 * x self.r1 = S({5: 1, 15: 0}) self.s1 = S({5: 1, 15: 0}) self.s2 = S({2: 1, 10: 0}) self.r3 = S({5: 1, 15: 0, 30: 1, 35: 0}) self.s3 = S({10: 1, 15: 0, 40: 1, 50: 0}) self.r4 = S({30: 1, 35: 0}) self.c1 = [(A({ "f1": self.r1 }), A({ "f1": self.s1 })), (A({ "f1": self.r3 }), A({ "f1": self.s3 }))] self.cd = [(A({ "f1": self.r1, "f2": self.r1 }), A({ "f1": self.s1, "f2": self.s1 })), (A({ "f1": self.r3, "f2": self.r3 }), A({ "f1": self.s3, "f2": self.s3 }))] self.c3 = [(A({ "f1": self.r1 }), A({ "f2": self.s1 })), (A({ "f1": self.r3 }), A({ "f2": self.s3 }))] self.c4 = [(A({ "f1": self.r1 }), A({ "f1": self.s2 })), (A({ "f1": self.r4 }), A({ "f1": self.s3 }))] self.filedur_1 = { "f1": 80, "f2": 100 } # Testing 0 FA denominator self.filedur_2 = { "f1": 15 } self.cost_0 = lambda x: 0 * x
def _sweep(alignment_records): c, m, f = partition_alignment(alignment_records) if file_framedur_lookup != 0: ref_sigs = build_ref_sig([ (ar.ref) for ar in c ], [(ar.ref) for ar in m], file_framedur_lookup ) fa_func=measure_funcs.pop() #sys_sig = build_sys_sig([ (ar.sys) for ar in c ]) #print "sys_sig" #print sys_sig sys_sig=[[],S()] #sys_sig[0] = [] #sys_sig[1] = S() total_c = len(c) num_m = len(m) out_points = [] current_c, current_f = [], [] # m records don't need to be sorted as they have None # confidence scores current_m = m + sorted(c, None, conf_key_func) remaining_f = sorted(f, None, conf_key_func) uniq_confs = sorted(set(map(conf_key_func, c + f)), reverse = True) for conf in uniq_confs: newsig = [] newsig_tem = [] while len(current_m) > 0 and current_m[-1].alignment != "MD" and conf_key_func(current_m[-1]) >= conf: if file_framedur_lookup != 0: newsig.append(current_m[-1]) current_c.append(current_m.pop()) while len(remaining_f) > 0 and conf_key_func(remaining_f[-1]) >= conf: if file_framedur_lookup != 0: newsig.append(remaining_f[-1]) current_f.append(remaining_f.pop()) if file_framedur_lookup != 0: if len(newsig) != 0: newsig_tem = add_sys_sig(sys_sig[0],[(ar.sys) for ar in newsig]) sys_sig[0] = sys_sig[0] + newsig_tem sys_sig[1] = reduce(add, [s[0] for s in newsig_tem], sys_sig[1]) out_points.append((conf, reduce(merge_dicts, [ m(current_c, current_m, current_f) for m in measure_funcs ], fa_func(ref_sigs, sys_sig)))) else: out_points.append((conf, reduce(merge_dicts, [ m(current_c, current_m, current_f) for m in measure_funcs ], {}))) #print "out_points" #print out_points #print "sys_sig" #print sys_sig #out_points.append((conf, reduce(merge_dicts, [ m(current_c, current_m, current_f) for m in measure_funcs ], fa_func(ref_sigs, sys_sig)))) return out_points
def build_ref_sig(ref, file_framedur_lookup): # Need to modify join, find ways to keep from doing full join each time. # reference stays the same, calculated once. system, just need to include # new false alarms ref_temp_add = {} not_ref = {} nr_area = {} if ref == {}: return [{}, {}, {}] for key, value in ref.items(): ref_temp = [temporal_single_signal(b) for b in value] ref_temp_add[key] = reduce(add, [r[0] for r in ref_temp], S()) not_ref[key] = ref_temp_add[key].not_sig(file_framedur_lookup[key]) nr_area[key] = not_ref[key].area() return[ref_temp_add, not_ref, nr_area]
def build_ref_sig(aligned_pairs, missed_ref, file_framedur_lookup): # Need to modify join, find ways to keep from doing full join each time. # reference stays the same, calculated once. system, just need to include # new false alarms # #print "Building Ref Sig" num_aligned = len(aligned_pairs) + len(missed_ref) if num_aligned == 0: return [{},{},0] #combined_ref = itertools.chain([temporal_single_signal(b) for b in aligned_pairs], [temporal_single_signal(m) for m in missed_ref]) #[b for b in aligned_pairs] + [m for m in missed_ref] #works ref_temp = [temporal_single_signal(b) for b in aligned_pairs ] + [ temporal_single_signal(m) for m in missed_ref ] #list(itertools.chain([temporal_single_signal(b) for b in aligned_pairs], [temporal_single_signal(m) for m in missed_ref])) #[temporal_single_signal(r) for r in combined_ref ] ref_temp_add=reduce(add, [r[0] for r in ref_temp], S()) #ref_temp_add = reduce(add,ref_temp) #special_join(ref_temp) not_ref=ref_temp_add.not_sig(file_framedur_lookup.get(ref_temp[0][1])) nr_area=not_ref.area() return[ref_temp_add, not_ref, nr_area]
def test2D(self): self.assertEqual(self.s2d_1.area(), 250) self.assertEqual(self.s2d_2.area(), 250) self.assertEqual(self.s2d_1.join(self.s2d_1.join(self.s2d_1, lambda a, b: a.join(b, min, 0), S()), lambda a, b: a.join(b, sub, 0), S()), S()) self.assertEqual(self.s2d_1.join(self.s2d_2, lambda a, b: a.join(b, min, 0), S()), S({10: S({15: 1, 20: 0}), 15: S(), 30: S({15: 1, 25: 0}), 35: S()})) self.assertEqual(self.s2d_1.join_nd(self.s2d_2, 2, min), S({10: S({15: 1, 20: 0}), 15: S(), 30: S({15: 1, 25: 0}), 35: S()})) self.assertEqual(self.s2d_1.join_nd(S(), 2, max), self.s2d_1) self.assertEqual(self.s2d_1.join_nd(self.s2d_2, 2, lambda a, b: a - min(a, b)), S({10: S({10: 1, 15: 0}), 15: S(), 30: S({25: 1, 35: 0}), 35: S({15: 1, 35: 0}), 40: S()})) self.assertEqual(self.s2d_1.join_nd(self.s2d_2, 2, lambda a, b: a - min(a, b)).area(), 175) self.assertEqual(self.s2d_1.join_nd(self.s2d_2, 2, max), S({10: S({10: 1, 25: 0}), 15: S({15: 1, 25: 0}), 30: S({15: 1, 35: 0}), 40: S()})) self.assertEqual(self.s2d_1.join_nd(self.s2d_2, 2, max).area(), 425)
def _temporal_signal_accessor(localization, k): return S(localization.get(k, {}))
def setUp(self): self.ae = A({}) self.a1 = A({"f1": O(S({10: S({10: 1, 20: 0}), 15: S()}))}) self.a2 = A({"f1": O(S({10: S({15: 1, 25: 0}), 35: S()}))}) self.a3 = A({"f1": O(S({30: S({15: 1, 35: 0}), 40: S()}))}) self.a4 = A({ "f1": O(S({ 10: S({ 10: 1, 20: 0 }), 15: S() })), "f2": O(S({ 30: S({ 15: 1, 35: 0 }), 40: S() })) }) self.a5 = A({ "f1": O(S({ 10: S({ 15: 1, 25: 0 }), 35: S() })), "f2": O(S({ 10: S({ 15: 1, 25: 0 }), 35: S() })) })
def _sweep(alignment_records): c, m, f = partition_alignment(alignment_records) sys_sig = {} sys_sig_add = {} ref_all = {} if file_framedur_lookup != 0: for key in file_framedur_lookup: sys_sig[key] = [] sys_sig_add[key] = S() ref_all[key] = [] for ar in c: ref_all[ar.video_file].append(ar.ref) for ar in m: ref_all[ar.video_file].append((ar.ref)) ref_sigs = build_ref_sig(ref_all, file_framedur_lookup) fa_func = measure_funcs.pop() out_points = [] current_c, current_f = [], [] # m records don't need to be sorted as they have None # confidence scores current_m = m + sorted(c, key=conf_key_func) remaining_f = sorted(f, key=conf_key_func) uniq_confs = sorted(set(map(conf_key_func, c + f)), reverse=True) if uniq_conf_limit != 0: if (len(uniq_confs) > uniq_conf_limit): le = len(uniq_confs) indices = np.round(np.linspace(0, len(uniq_confs)-1, min(len(uniq_confs), uniq_conf_limit)) ).astype(int) uniq_confs = list(np.array(uniq_confs)[indices]) print("""[Info] Reducing to {} unique confidence scores for""" """ Sweeping to {} [{},{}] unique confidence scores""" """""".format( le, len(uniq_confs), uniq_confs[0], uniq_confs[-1])) for conf in uniq_confs: newsig = {} newsig_tem = {} while len(current_m) > 0 and current_m[-1].alignment != "MD" and \ conf_key_func(current_m[-1]) >= conf: if file_framedur_lookup != 0: if not current_m[-1].video_file in newsig: newsig[current_m[-1].video_file] = [] newsig[current_m[-1].video_file].append(current_m[-1]) current_c.append(current_m.pop()) while len(remaining_f) > 0 and \ conf_key_func(remaining_f[-1]) >= conf: if file_framedur_lookup != 0: if not remaining_f[-1].video_file in newsig: newsig[remaining_f[-1].video_file] = [] newsig[remaining_f[-1].video_file].append(remaining_f[-1]) current_f.append(remaining_f.pop()) if file_framedur_lookup != 0: if newsig != {}: for key, value in newsig.items(): newsig_tem[key] = add_sys_sig( sys_sig[key], [(ar.sys) for ar in newsig[key]]) sys_sig[key] = sys_sig[key] + newsig_tem[key] sys_sig_add[key] = reduce( add, [s[0] for s in newsig_tem[key]], sys_sig_add[key]) out_points.append(( conf, reduce(merge_dicts, [ m(current_c, current_m, current_f) for m in measure_funcs], fa_func( ref_sigs, sys_sig, sys_sig_add)))) else: out_points.append(( conf, reduce(merge_dicts, [ m(current_c, current_m, current_f) for m in measure_funcs], {}))) return out_points
def _bounding_box_to_signal(bounding_box): x, y, w, h = map(lambda e: bounding_box[e], ("x", "y", "w", "h")) return S({x: S({y: 1, y + h: 0}), x + w: S()})
def testSignalAdditionPass3(self): self.assertEqual(self.s1 + self.s5, S({0: 1, 60: 0})) self.assertEqual(self.s1 + self.s1, S({30: 2, 60: 0})) self.assertEqual(self.s1 + self.s6, S({30: 1, 100: 0})) self.assertEqual(self.s1 + self.s7, S({0: 1, 30: 2, 60: 1, 100: 0}))
def testSignalAdditionPass2(self): self.assertEqual(self.s1 + self.s2, S({0: 1, 10: 0, 30: 1, 60: 0})) self.assertEqual(self.s1 + self.s3, S({30: 2, 40: 1, 60: 0})) self.assertEqual(self.s1 + self.s4, S({30: 1, 80: 0}))
def testSignalEquivalence(self): self.assertEqual(self.se, self.se) self.assertEqual(self.s1, self.s1) self.assertEqual(S({30: 1, 60: 0}), self.s1)
def setUp(self): super(TestSparseSignal, self).setUp() self.se = S() self.s1 = S({30: 1, 60: 0}) self.s2 = S({0: 1, 10: 0}) self.s3 = S({30: 1, 40: 0}) self.s4 = S({60: 1, 80: 0}) self.s5 = S({0: 1, 30: 0}) self.s6 = S({60: 1, 100: 0}) self.s7 = S({0: 1, 100: 0}) self.sb1 = S({0: 1, 30: 2, 60: 1, 100: 0}) self.sb2 = S({30: 2, 40: 1, 60: 0}) self.sb3 = S({30: 1, 60: 2, 100: 1, 130: 0}) self.ss1 = S({0: 1, 30: 0, 60: 1, 100: 0}) self.si1 = S({45: 1, 100: 0}) self.sd1 = S({10: 1, 20: 0}) self.sd2 = S({10.45: 1, 20: 0}) self.sd3 = S({10.00: 1, 20.85: 0}) self.sd4 = S({10.90: 1, 20.35: 0}) self.sd5 = S({0.12: 1, 30.32: 0, 30.7: 1, 100.2: 0}) self.sd6 = S({0.12: 1, 30.32: 2, 60.7: 1, 100.2: 0}) self.sn1 = S({0: 1, 10: 1, 20: 0}) self.sc1 = S({5: 1, 10: 0}) self.sc2 = S({5: 1, 7:1, 10: 0}) self.sc3 = S({0: 1, 10: 0}) self.s2d_1 = S({10: S({10: 1, 20: 0}), 15: S(), 30: S({15: 1, 35: 0}), 40: S()}) self.s2d_2 = S({10: S({15: 1, 25: 0}), 35: S()}) self.s3d_1 = S({1: S({10: S({10: 1, 20: 0}), 20: S()}), 2: S({10: S({10: 1, 30: 0}), 30: S()}), 4: S()}) self.s3d_2 = S({1: S({10: S({10: 1, 20: 0}), 20: S()}), 4: S()}) self.s_iter1 = S({3: 1, 5: 2, 10: 0}) self.s_iter2 = S({3: 1, 5: 0, 7: 1, 10: 0})
def __init__(self, bounding_box, conf, obj_type, obj_id): self.spatial_signal = _bounding_box_to_signal( bounding_box) if bounding_box else S() self.presenceConf = conf self.objectType = obj_type self.objectID = obj_id
def _spatial_signal_accessor(localization, k): if k in localization: return localization.get(k).spatial_signal else: return S()
def _temporal_signal_accessor(localization, k): #print 'localization:' #print localization return S(localization.get(k, {}))
def _object_congruence(r, s, obj_kernel_builder, ref_filter, sys_filter, cmiss, cfa, target_rfas): ro, so = r.objects, s.objects # For N_MODE computation, localizations spanning multiple files # are treated independently total_c, total_m, total_f, total_r = [], [], [], [] frame_alignment_records = [] # We need to report out the ref filter localization for aggregate # PMiss@RFA measurements ref_filter_localization = {} for r, s, k in temporal_signal_pairs(r, s): local_so_localizations = map(lambda o: o.localization.get(k, S()), so) local_ro_localizations = map(lambda o: o.localization.get(k, S()), ro) sos_lookup = _object_signals_to_lookup(sys_filter(r, s), local_so_localizations) ros_lookup = _object_signals_to_lookup(ref_filter(r, s), local_ro_localizations) ref_filter_localization[k] = ref_filter(r, s) for frame in sos_lookup.viewkeys() | ros_lookup.viewkeys(): sys = sos_lookup.get(frame, []) ref = ros_lookup.get(frame, []) c, m, f = perform_alignment(ref, sys, obj_kernel_builder(sys)) total_c.extend(c) total_m.extend(m) total_f.extend(f) total_r.extend(ref) for ar in c + m + f: frame_alignment_records.append((frame, ar)) num_miss = len(total_m) num_correct = len(total_c) ref_filter_area = sum([v.area() for v in ref_filter_localization.values()]) def _conf_sweep_reducer(init, conf): mode_scores, det_points = init num_filtered_c = len( filter(lambda ar: ar.sys.presenceConf >= conf, total_c)) num_filtered_fa = len( filter(lambda ar: ar.sys.presenceConf >= conf, total_f)) num_miss_w_filtered_c = num_miss + num_correct - num_filtered_c mode_scores.append((conf, mode(num_filtered_c, num_miss_w_filtered_c, num_filtered_fa, cmiss, cfa))) det_points.append((conf, r_fa(num_filtered_c, num_miss_w_filtered_c, num_filtered_fa, ref_filter_area), p_miss(num_filtered_c, num_miss_w_filtered_c, num_filtered_fa))) return init mode_scores, det_points = reduce( _conf_sweep_reducer, sorted(list({ar.sys.presenceConf for ar in total_c + total_f})), ([], [])) min_mode = min(map(lambda x: x[1], mode_scores)) if len(mode_scores) > 0 else None pmiss_at_rfa_measures = { "object-p_miss@{}rfa".format(target_rfa): p_miss_at_r_fa(det_points, target_rfa) for target_rfa in target_rfas } out_components = { "object_congruence": 1 - min_mode if min_mode is not None else None, "minMODE": min_mode, "MODE_records": mode_scores, "alignment_records": frame_alignment_records, "det_points": det_points, "ref_filter_localization": ref_filter_localization } return merge_dicts(out_components, pmiss_at_rfa_measures)
def _object_tracking_congruence(r, s, obj_kernel_builder, ref_filter, sys_filter, object_types, cmiss, cfa, cid, target_rfas): # If object_types provided as non-empty array, only consider objects included in object_types ro = r.objects if len(object_types) == 0 else filter( lambda o: o.objectType in object_types, r.objects) so = s.objects if len(object_types) == 0 else filter( lambda o: o.objectType in object_types, s.objects) # For N_MODE computation, localizations spanning multiple files # are treated independently total_c, total_m, total_f, total_r = [], [], [], [] frame_alignment_records = [] correct_frame_alignment_records = [] # We need to report out the ref filter localization for aggregate # PMiss@RFA measurements ref_filter_localization = {} # obj_align={} # FirstRun=True for r, s, k in temporal_signal_pairs(r, s): local_so_localizations = map(lambda o: o.localization.get(k, S()), so) local_ro_localizations = map(lambda o: o.localization.get(k, S()), ro) sos_lookup = _object_signals_to_lookup(sys_filter(r, s), local_so_localizations) ros_lookup = _object_signals_to_lookup(ref_filter(r, s), local_ro_localizations) ref_filter_localization[k] = ref_filter(r, s) FirstRun = True obj_align = {} for frame in sos_lookup.viewkeys() | ros_lookup.viewkeys(): sys = sos_lookup.get(frame, []) ref = ros_lookup.get(frame, []) c, m, f = perform_alignment(ref, sys, obj_kernel_builder(sys)) #print "C: " #print c #print "C: " #print c #print "REFOBJECT ID?" #print c[0].ref.objectID #print "SYSOBJECT ID?" #print c[0].sys.objectID #print "FRAME: " #print frame #c.append(obj_switch) total_c.extend(c) total_m.extend(m) total_f.extend(f) total_r.extend(ref) for ar in c: correct_frame_alignment_records.append((frame, ar)) for ar in c + m + f: frame_alignment_records.append((frame, ar)) #print "correct_frame_alignment" #print correct_frame_alignment_records ref_filter_area = sum([v.area() for v in ref_filter_localization.values()]) sweeper = build_sweeper(lambda r: r.sys.presenceConf, [ build_rfa_metric(ref_filter_area), build_pmiss_metric(), build_mode_metric(cmiss, cfa), build_mote_metric(correct_frame_alignment_records, lambda r: r.sys.presenceConf, cmiss, cfa, cid) ]) sweep_recs = sweeper(total_c + total_m + total_f) # Filter out None mode scores (in the case of zero reference # objects) mode_scores = filter(lambda r: r[1] is not None, flatten_sweeper_records(sweep_recs, ["mode"])) mote_scores = filter(lambda r: r[1] is not None, flatten_sweeper_records(sweep_recs, ["mote"])) # print "mote_scores" # print mote_scores det_points = flatten_sweeper_records(sweep_recs, ["rfa", "p_miss"]) min_mode = min(map(lambda x: x[1], mode_scores)) if len(mode_scores) > 0 else None min_mote = min(map(lambda x: x[1], mote_scores)) if len(mote_scores) > 0 else None pmiss_at_rfa_measures = get_points_along_confidence_curve( sweep_recs, "rfa", lambda r: r["rfa"], "object-p_miss", lambda r: r["p_miss"], target_rfas) out_components = { "object_congruence": 1 - min_mode if min_mode is not None else None, "minMODE": min_mode, "MODE_records": mode_scores, "object_tracking_congruence": min_mote if min_mote is not None else None, "minMOTE": min_mote, "MOTE_records": mote_scores, "alignment_records": frame_alignment_records, "det_points": det_points, "ref_filter_localization": ref_filter_localization } return merge_dicts(out_components, pmiss_at_rfa_measures)
def testNormalize(self): self.assertEqual(self.sn1.normalize(), S({0: 1, 20: 0}))