def main(): parser = buildArgsParser() args = parser.parse_args() sort_by = args.sort names = [] results_files = [] hyperparams_files = [] status_files = [] for f in args.results: exp_folder = f if os.path.isfile(f): exp_folder = os.path.dirname(f) result_file = pjoin(exp_folder, "result.json") hyperparams_file = pjoin(exp_folder, "hyperparams.json") status_file = pjoin(exp_folder, "status.json") if not os.path.isfile(result_file): print 'Skip: {0} is not a file!'.format(result_file) continue if not os.path.isfile(hyperparams_file): print 'Skip: {0} is not a file!'.format(hyperparams_file) continue if not os.path.isfile(status_file): print 'Skip: {0} is not a file!'.format(status_file) continue name = os.path.basename(exp_folder) if 'hyperparams.json' in os.listdir(os.path.abspath(pjoin(exp_folder, os.path.pardir))): name = os.path.basename(os.path.abspath(pjoin(exp_folder, os.path.pardir))) names.append(name) results_files.append(result_file) hyperparams_files.append(hyperparams_file) status_files.append(status_file) if len([no for no in sort_by if no == 0]) > 0: parser.error('Column ID are starting at 1!') # Retrieve headers from hyperparams headers_hyperparams = set() headers_results = set() headers_status = set() for hyperparams_file, status_file, results_file in zip(hyperparams_files, status_files, results_files): hyperparams = load_dict_from_json_file(hyperparams_file) results = load_dict_from_json_file(results_file) status = load_dict_from_json_file(status_file) headers_hyperparams |= set(hyperparams.keys()) headers_results |= set(results.keys()) headers_status |= set(status.keys()) headers_hyperparams = sorted(list(headers_hyperparams)) headers_status = sorted(list(headers_status)) # TODO: when generating result.json split 'trainset' scores in two key: # 'trainset' and 'trainset_std' (same goes for validset and testset). headers_results |= set(["trainset_std", "validset_std", "testset_std"]) headers_results = sorted(list(headers_results)) headers = headers_hyperparams + headers_status + ["name"] + headers_results # Build results table table = Texttable(max_width=0) table.set_deco(Texttable.HEADER) table.set_precision(8) table.set_cols_dtype(['a'] * len(headers)) table.set_cols_align(['c'] * len(headers)) # Headers table.header([str(i) + "\n" + h for i, h in enumerate(headers, start=1)]) if args.only_header: print table.draw() return # Results for name, hyperparams_file, status_file, results_file in zip(names, hyperparams_files, status_files, results_files): hyperparams = load_dict_from_json_file(hyperparams_file) results = load_dict_from_json_file(results_file) status = load_dict_from_json_file(status_file) # Build results table row (hyperparams columns) row = [] for h in headers_hyperparams: value = hyperparams.get(h, '') row.append(value) for h in headers_status: value = status.get(h, '') row.append(value) row.append(name) for h in headers_results: if h in ["trainset", "validset", "testset"]: value = results.get(h, '')[0] elif h in ["trainset_std", "validset_std", "testset_std"]: value = results.get(h[:-4], '')[1] else: value = results.get(h, '') row.append(value) table.add_row(row) # Sort for col in reversed(sort_by): table._rows = sorted(table._rows, key=sort_nicely(abs(col) - 1), reverse=col < 0) if args.out is not None: import csv results = [] results.append(headers) results.extend(table._rows) with open(args.out, 'wb') as csvfile: w = csv.writer(csvfile) w.writerows(results) else: print table.draw()
def main(): parser = buildArgsParser() args = parser.parse_args() scoring_files = args.scores sort_by = args.sort_by if len(scoring_files) == 1 and os.path.isdir(scoring_files[0]): scoring_files = [os.path.join(scoring_files[0], f) for f in os.listdir(scoring_files[0])] scoring_files = filter(lambda f: f.endswith('.json'), scoring_files) if len(scoring_files) == 0: parser.error('Need a least one scoring file/folder!') if len([no for no in sort_by if no == 0]) > 0: parser.error('Column ID are starting at 1.') headers_params = set() headers_scores = set() params = [] scores = [] # Retrieves scores for scoring_file in scoring_files: infos = scoring_file.split('/')[-1][:-5] if args.is_split: ind = 0 if infos.find('-') < infos.find('_') else 1 param = dict([tuple(param.split('-')) for param in infos.split('_')[ind:]]) else: param = {"filename": infos} # score = pickle.load(open(scoring_file)) score = json.load(open(scoring_file)) # Compute the VCCR metric, CSR=1-NC # In VCCR, VCWP are considered as IC [Girard et al., NeuroImage, 2014] if score['VC'] > 0: score['VCCR'] = score['VC'] / (score['VC'] + score['IC'] + score['VCWP']) else: score['VCCR'] = 0 # Keep only scalar metrics. for k in score.keys(): if k not in METRICS: del score[k] headers_params |= set(param.keys()) headers_scores |= set(score.keys()) scores.append(score) params.append(param) nbr_cols = len(headers_params) + len(headers_scores) if len([no for no in sort_by if abs(no) > nbr_cols]) > 0: parser.error('The maximum column ID is {0}.'.format(nbr_cols)) table = Texttable(max_width=0) table.set_deco(Texttable.HEADER) table.set_cols_dtype(['a'] * nbr_cols) table.set_cols_align(['l'] + ['c'] * (nbr_cols-1)) # Headers headers_params = sorted(headers_params) headers_scores = list(headers_scores) headers_scores = [headers_scores[headers_scores.index(e)] for e in METRICS if e in headers_scores] headers = headers_params + headers_scores table.header([str(i) + "\n" + h for i, h in enumerate(headers, start=1)]) # Data for param, score in zip(params, scores): data = [] for header in headers_params: data.append(param.get(header, '-')) for header in headers_scores: data.append(score.get(header, '-')) table.add_row(data) # Sort for col in reversed(sort_by): table._rows = sorted(table._rows, key=sort_nicely(abs(col) - 1), reverse=col < 0) print(table.draw())
def main(): parser = buildArgsParser() args = parser.parse_args() scoring_files = args.scores sort_by = args.sort_by if len(scoring_files) == 1 and os.path.isdir(scoring_files[0]): scoring_files = [ os.path.join(scoring_files[0], f) for f in os.listdir(scoring_files[0]) ] scoring_files = filter(lambda f: f.endswith('.json'), scoring_files) if len(scoring_files) == 0: parser.error('Need a least one scoring file/folder!') if len([no for no in sort_by if no == 0]) > 0: parser.error('Column ID are starting at 1.') headers_params = set() headers_scores = set() params = [] scores = [] # Retrieves scores for scoring_file in scoring_files: infos = scoring_file.split('/')[-1][:-5] if args.is_split: ind = 0 if infos.find('-') < infos.find('_') else 1 param = dict( [tuple(param.split('-')) for param in infos.split('_')[ind:]]) else: param = {"filename": infos[:60]} # score = pickle.load(open(scoring_file)) score = json.load(open(scoring_file)) # Compute the VCCR metric, CSR=1-NC # In VCCR, VCWP are considered as IC [Girard et al., NeuroImage, 2014] if score['VC'] > 0: score['VCCR'] = score['VC'] / (score['VC'] + score['IC'] + score['VCWP']) else: score['VCCR'] = 0 # Keep only scalar metrics. for k in score.keys(): if k not in METRICS: del score[k] headers_params |= set(param.keys()) headers_scores |= set(score.keys()) scores.append(score) params.append(param) nbr_cols = len(headers_params) + len(headers_scores) if len([no for no in sort_by if abs(no) > nbr_cols]) > 0: parser.error('The maximum column ID is {0}.'.format(nbr_cols)) table = Texttable(max_width=0) table.set_deco(Texttable.HEADER) table.set_cols_dtype(['a'] * nbr_cols) table.set_cols_align(['l'] + ['c'] * (nbr_cols - 1)) # Headers headers_params = sorted(headers_params) headers_scores = list(headers_scores) headers_scores = [ headers_scores[headers_scores.index(e)] for e in METRICS if e in headers_scores ] headers = headers_params + headers_scores table.header([str(i) + "\n" + h for i, h in enumerate(headers, start=1)]) # Data for param, score in zip(params, scores): data = [] for header in headers_params: data.append(param.get(header, '-')) for header in headers_scores: data.append(score.get(header, '-')) table.add_row(data) # Sort for col in reversed(sort_by): table._rows = sorted(table._rows, key=sort_nicely(abs(col) - 1), reverse=col < 0) print(table.draw())
def main(): parser = buildArgsParser() args = parser.parse_args() sort_by = args.sort names = [] results_files = [] hyperparams_files = [] status_files = [] for f in args.results: exp_folder = f if os.path.isfile(f): exp_folder = os.path.dirname(f) result_file = pjoin(exp_folder, "result.json") hyperparams_file = pjoin(exp_folder, "hyperparams.json") status_file = pjoin(exp_folder, "status.json") if not os.path.isfile(result_file): print 'Skip: {0} is not a file!'.format(result_file) continue if not os.path.isfile(hyperparams_file): print 'Skip: {0} is not a file!'.format(hyperparams_file) continue if not os.path.isfile(status_file): print 'Skip: {0} is not a file!'.format(status_file) continue name = os.path.basename(exp_folder) if 'hyperparams.json' in os.listdir( os.path.abspath(pjoin(exp_folder, os.path.pardir))): name = os.path.basename( os.path.abspath(pjoin(exp_folder, os.path.pardir))) names.append(name) results_files.append(result_file) hyperparams_files.append(hyperparams_file) status_files.append(status_file) if len([no for no in sort_by if no == 0]) > 0: parser.error('Column ID are starting at 1!') # Retrieve headers from hyperparams headers_hyperparams = set() headers_results = set() headers_status = set() for hyperparams_file, status_file, results_file in zip( hyperparams_files, status_files, results_files): hyperparams = load_dict_from_json_file(hyperparams_file) results = load_dict_from_json_file(results_file) status = load_dict_from_json_file(status_file) headers_hyperparams |= set(hyperparams.keys()) headers_results |= set(results.keys()) headers_status |= set(status.keys()) headers_hyperparams = sorted(list(headers_hyperparams)) headers_status = sorted(list(headers_status)) # TODO: when generating result.json split 'trainset' scores in two key: # 'trainset' and 'trainset_std' (same goes for validset and testset). headers_results |= set(["trainset_std", "validset_std", "testset_std"]) headers_results = sorted(list(headers_results)) headers = headers_hyperparams + headers_status + ["name"] + headers_results # Build results table table = Texttable(max_width=0) table.set_deco(Texttable.HEADER) table.set_precision(8) table.set_cols_dtype(['a'] * len(headers)) table.set_cols_align(['c'] * len(headers)) # Headers table.header([str(i) + "\n" + h for i, h in enumerate(headers, start=1)]) if args.only_header: print table.draw() return # Results for name, hyperparams_file, status_file, results_file in zip( names, hyperparams_files, status_files, results_files): hyperparams = load_dict_from_json_file(hyperparams_file) results = load_dict_from_json_file(results_file) status = load_dict_from_json_file(status_file) # Build results table row (hyperparams columns) row = [] for h in headers_hyperparams: value = hyperparams.get(h, '') row.append(value) for h in headers_status: value = status.get(h, '') row.append(value) row.append(name) for h in headers_results: if h in ["trainset", "validset", "testset"]: value = results.get(h, '')[0] elif h in ["trainset_std", "validset_std", "testset_std"]: value = results.get(h[:-4], '')[1] else: value = results.get(h, '') row.append(value) table.add_row(row) # Sort for col in reversed(sort_by): table._rows = sorted(table._rows, key=sort_nicely(abs(col) - 1), reverse=col < 0) if args.out is not None: import csv results = [] results.append(headers) results.extend(table._rows) with open(args.out, 'wb') as csvfile: w = csv.writer(csvfile) w.writerows(results) else: print table.draw()