def test_compareTrackers_1(): ''' functionality test for: basic functionality roiSelectWindow bMarkedFrame success calling multiPlot multiplot_params ''' # build listTrackers listTrackers = [] for _algoenum in [0, 1]: _tracker = TrackFactory(on=True) _tracker.setAlgoEnum(_algoenum) _tracker.setInit(ballColor="orange") listTrackers.append(_tracker) # build listGS test_data = "compareTrackers_orange.db" test_dir = os.path.join(TEST_PARENT_DIR, 'compareTrackers') testDB = DBInterface(os.path.join(test_dir, test_data)) listGS = [pickle.loads(record[1]) for record in testDB.selectAll()] # run method with test_mock flag data_dict = compareTrackers(listGS, listTrackers, test_stub=True) # checks print data_dict['row_titles'] assert data_dict['row_titles'] == [ 'marked_frame', 'img_t', 'img_mask', 'img_repair', 'img_terminal' ] assert data_dict['col_titles'] == ['AlgoEnum=0', 'AlgoEnum=1'] assert data_dict['plot_data'][0][1].shape == (480, 640, 3 ) # since roiSelectFunc=None # run 1. without markedFrame at the top, 2. with selectRoiFunc data_dict = compareTrackers(listGS, listTrackers, roiSelectFunc=True, bMarkedFrame=False, test_stub=True) assert data_dict['row_titles'] == [ 'img_t', 'img_mask', 'img_repair', 'img_terminal' ] assert data_dict['plot_data'][0][1].shape != (480, 640, 3) # don't run with test_stub; thus sending data into multiPlot() to see # if any exceptions are thrown. use bSupressDisplay to prevent matplotlib output # from popping-up during the tests compareTrackers(listGS, listTrackers, test_stub=False, multiplot_params={ 'figsize': (20, 20), 'bSupressDisplay': True })
def test_compareTrackers_3(): ''' test functionality: aligning algo's with different num of diagnostic-plots ''' # build listTrackers listTrackers = [] for _algoenum in [2, 3]: _tracker = TrackFactory(on=True) _tracker.setAlgoEnum(_algoenum) _tracker.setInit(ballColor="orange") listTrackers.append(_tracker) # build listGS test_data = "compareTrackers_orange.db" test_dir = os.path.join(TEST_PARENT_DIR, 'compareTrackers') testDB = DBInterface(os.path.join(test_dir, test_data)) listGS = [pickle.loads(record[1]) for record in testDB.selectAll()] # run two separate functions with different params, compare their output data_dict_1 = compareTrackers(listGS, listTrackers, roiSelectFunc=True, test_stub=True) data_dict_2 = compareTrackers(listGS, listTrackers, roiSelectFunc=True, expand_factor=0.5, blend_rowtitles=True, test_stub=True) # checks assert data_dict_1['row_titles'] == [ 'marked_frame', 'img_t', 'img_mask', 'img_repair', 'img_dummy', 'img_dummy_2', 'img_terminal', 'img_terminal_2' ] print data_dict_2['row_titles'] assert data_dict_2['row_titles'] == [ 'marked_frame\nmarked_frame', 'img_t\nimg_t', 'img_mask\nimg_mask', 'img_repair\nimg_repair', 'img_terminal\nimg_dummy', 'n/a\nimg_dummy_2', 'n/a\nimg_terminal', 'n/a\nimg_terminal_2' ] assert sum(sum(sum(data_dict_1['plot_dict']['img_t'][0]))) > 0 assert data_dict_1['plot_dict']['img_dummy'][0] is None assert sum(sum(sum(data_dict_1['plot_dict']['img_dummy'][1]))) > 0 assert data_dict_2['plot_dict']['img_dummy'][0] is None assert sum(sum(sum(data_dict_2['plot_dict']['img_dummy'][1]))) > 0 assert data_dict_1['plot_data'][0][7] is None
def debug_reveresed_plots(): # build listGS foi = [202, 206, 210, 244, 305] vid_fn = 'data/proc/tmp/dec14/output5.proc1.proc1.avi' listGS = subprocBatchOutput(vid_fn, batch_list=foi) #build trackers listTrackers = [] for _algoenum in [0, 2]: _tracker = TrackFactory(on=True) _tracker.setAlgoEnum(_algoenum) _tracker.setInit(ballColor="orange") listTrackers.append(_tracker) print 'algo_enums=', str( [_tracker.tp_trackAlgoEnum for _tracker in listTrackers]) listTrackers.sort(reverse=True) print 'algo_enums=', str( [_tracker.tp_trackAlgoEnum for _tracker in listTrackers]) #run method ret = compareTrackers(listGS, listTrackers, roiSelectFunc=True, bMarkedFrame=True, bTrackScore=True, bFirstTrackerRoi=True, expand_factor=5.0, test_stub=True)
def test_compareTrackers_5(): ''' test functionality: blend_rowtitles ''' # build listTrackers listTrackers = [] for _algoenum in [2, 3]: _tracker = TrackFactory(on=True) _tracker.setAlgoEnum(_algoenum) _tracker.setInit(ballColor="orange") listTrackers.append(_tracker) # build listGS test_data = "compareTrackers_orange.db" test_dir = os.path.join(TEST_PARENT_DIR, 'compareTrackers') testDB = DBInterface(os.path.join(test_dir, test_data)) listGS = [pickle.loads(record[1]) for record in testDB.selectAll()] # run with blend_rowtitles data_dict_2 = compareTrackers(listGS, listTrackers, roiSelectFunc=True, expand_factor=0.5, blend_rowtitles=True, test_stub=True) # checks print data_dict_2['row_titles'] assert data_dict_2['row_titles'] == [ 'marked_frame\nmarked_frame', 'img_t\nimg_t', 'img_mask\nimg_mask', 'img_repair\nimg_repair', 'img_terminal\nimg_dummy', 'n/a\nimg_dummy_2', 'n/a\nimg_terminal', 'n/a\nimg_terminal_2' ] # verify plots in blend_rowtitles is in correct order data_dict_2['plot_data'][0][4] is not None try: data_dict_2['plot_data'][0][5] is None assert False # this should be out-of-index except: pass data_dict_2['plot_data'][1][4] is not None data_dict_2['plot_data'][1][5] is not None
def test_compareTrackers_4(): ''' test functionality: bFirstTrackerRoi ''' # build listTrackers: place algo_enum=2 in first position listTrackers = [] for _algoenum in [2, 0]: _tracker = TrackFactory(on=True) _tracker.setAlgoEnum(_algoenum) _tracker.setInit(ballColor="orange") listTrackers.append(_tracker) # build listGS - these foi's are specifically chosen as the roi from track_score # from algo_enum=0 and algo_enum=2 are very different test_data = "compareTrackers_disparateRoi.db" test_dir = os.path.join(TEST_PARENT_DIR, 'compareTrackers') testDB = DBInterface(os.path.join(test_dir, test_data)) listGS = [pickle.loads(record[1]) for record in testDB.selectAll()] # run two separate functions with different params, compare their output data_indv_roi = compareTrackers( listGS, listTrackers, roiSelectFunc=True, bTrackScore=True, bFirstTrackerRoi=False # var-of-interest , expand_factor=2.0, test_stub=True) data_first_roi = compareTrackers( listGS, listTrackers, roiSelectFunc=True, bTrackScore=True, bFirstTrackerRoi=True # var-of-interest , expand_factor=2.0, test_stub=True) #compare col_titles - extract window-roi from string assert data_indv_roi['col_titles'] == [ 'AlgoEnum=2\n(373, 277, 10, 10)', 'AlgoEnum=0\n(395, -7, 16, 16)' ] assert data_first_roi['col_titles'] == [ 'AlgoEnum=2\n(373, 277, 10, 10)', 'AlgoEnum=0\n(373, 277, 10, 10)' ] #make sure empty plot is in correct position - the second position plot_row = data_indv_roi['plot_dict']['img_terminal'] assert sum(sum(plot_row[0])) > 0 assert plot_row[1] is None plot_row = data_first_roi['plot_dict']['img_terminal'] assert sum(sum(plot_row[0])) > 0 assert plot_row[1] is None # use shape to verify the same roi is being applied across all trackers assert (data_indv_roi['plot_dict']['img_t'][0].shape != data_indv_roi['plot_dict']['img_t'][1].shape) assert (data_first_roi['plot_dict']['img_t'][0].shape == data_first_roi['plot_dict']['img_t'][1].shape)
def test_compareTrackers_2(): ''' test functionality: col_titles expand_factor blend_rowtitles pixel-comparison ''' # build listTrackers listTrackers = [] for _algoenum in [0, 1]: _tracker = TrackFactory(on=True) _tracker.setAlgoEnum(_algoenum) _tracker.setInit(ballColor="orange") listTrackers.append(_tracker) # build listGS test_data = "compareTrackers_orange.db" test_dir = os.path.join(TEST_PARENT_DIR, 'compareTrackers') testDB = DBInterface(os.path.join(test_dir, test_data)) listGS = [pickle.loads(record[1]) for record in testDB.selectAll()] # run two separate functions with different params, compare their output data_dict_1 = compareTrackers(listGS, listTrackers, roiSelectFunc=True, col_titles=['my_col_1', 'my_col_2'], test_stub=True) data_dict_2 = compareTrackers(listGS, listTrackers, roiSelectFunc=True, expand_factor=0.5, blend_rowtitles=True, test_stub=True) # checks assert data_dict_1['row_titles'] == [ 'marked_frame', 'img_t', 'img_mask', 'img_repair', 'img_terminal' ] assert data_dict_2['row_titles'] == [ 'marked_frame\nmarked_frame', 'img_t\nimg_t', 'img_mask\nimg_mask', 'img_repair\nimg_repair', 'n/a\nimg_terminal' ] assert data_dict_1['col_titles'] == ['my_col_1', 'my_col_2'] assert data_dict_2['col_titles'] == ['AlgoEnum=0', 'AlgoEnum=1'] assert (data_dict_1['plot_dict']['img_t'][0].shape[0] < data_dict_2['plot_dict']['img_t'][0].shape[0]) # verify plots in blend_rowtitles is in correct order # data_dict_2['plot_data'][col][row] is None # data_dict_2['plot_data'][col][row] is not None # pixel-wise comparison DIFF_LOG_DIR = "../data/test/guiview/displayclass/log/" diff = ImgDiff(log_path=DIFF_LOG_DIR) loaded_mf1 = cv2.imread( os.path.join(test_dir, 'benchmark_markedframe_1.png')) loaded_mf2 = cv2.imread( os.path.join(test_dir, 'benchmark_markedframe_2.png')) mf1 = data_dict_1['plot_dict']['marked_frame'][0] mf2 = data_dict_2['plot_dict']['marked_frame'][0] assert diff.diffImgs(mf1, loaded_mf1) assert diff.diffImgs(mf2, loaded_mf2)