def __init__(self, config): self.reviewed_eval_result_file = 'reviewed-result.csv' self.visualize_result_file = 'result.csv' self.config = config white_list = [] black_list = [] if 'white_list' in config: white_list.extend(config.white_list) if 'black_list' in config: black_list.extend(config.black_list) if 'dataset' in config: white_list.extend(get_white_list(join(self.DATASETS_FILE_PATH), config.dataset)) self.runner = TaskRunner(Benchmark.DATA_PATH, white_list, black_list)
def test_get_white_list_returns_empty_list_by_default(): actual = get_white_list('', None) assert_equals([], actual)