def _save_performance_summary(config, train_iter=None): """Save a summary of the test results achieved so far in a easy to parse file (for humans and subsequent programs). Args: config: Command-line arguments. train_iter: (optional) The current training iteration. Though, the results written in the file correspond have there own training iteration assigned. """ if train_iter is None: train_iter = config.n_iter tp = dict() if config.upper_bound or (config.infer_task_id and config.cl_scenario == 1): config.num_weights_rp_net = 0 config.num_weights_rp_hyper_net = 0 config.compression_ratio_rp = 0 tp["acc_after_list"] = misc.list_to_str(config.overall_acc_list) tp["acc_during_list"] = misc.list_to_str(config.during_accs_final) tp["acc_after_mean"] = config.acc_mean tp["acc_during_mean"] = sum(config.during_accs_final) / config.num_tasks tp["num_weights_class_net"] = config.num_weights_class_net tp["num_weights_rp_net"] = config.num_weights_rp_net tp["num_weights_rp_hyper_net"] = config.num_weights_rp_hyper_net tp["num_weights_class_hyper_net"] = config.num_weights_class_hyper_net tp["compression_ratio_rp"] = config.compression_ratio_rp tp["compression_ratio_class"] = config.compression_ratio_class tp["overall_task_infer_accuracy_list"] = \ misc.list_to_str(config.overall_task_infer_accuracy_list) tp["acc_task_infer_mean"] = config.acc_task_infer_mean # Note, the keywords of this dictionary are defined by the array: # hpsearch._SUMMARY_KEYWORDS with open(os.path.join(config.out_dir, hpsearch._SUMMARY_FILENAME), 'w') as f: assert ('num_train_iter' in hpsearch._SUMMARY_KEYWORDS) for kw in hpsearch._SUMMARY_KEYWORDS: if kw == 'num_train_iter': f.write('%s %d\n' % ('num_train_iter', train_iter)) continue if kw == 'finished': continue else: try: f.write('%s %f\n' % (kw, tp[kw])) except: f.write('%s %s\n' % (kw, tp[kw]))
def save_summary_dict(config, shared, experiment): """Write a text file in the result folder that gives a quick overview over the results achieved so far. Args: (....): See docstring of function :func:`setup_summary_dict`. """ # "setup_summary_dict" must be called first. assert (hasattr(shared, 'summary')) summary_fn = 'performance_summary.txt' #summary_fn = hpperm._SUMMARY_FILENAME with open(os.path.join(config.out_dir, summary_fn), 'w') as f: for k, v in shared.summary.items(): if isinstance(v, list): f.write('%s %s\n' % (k, misc.list_to_str(v))) elif isinstance(v, float): f.write('%s %f\n' % (k, v)) else: f.write('%s %d\n' % (k, v)) with open(os.path.join(config.out_dir, 'forward'), 'w') as f: accs = [acc * 0.01 for acc in shared.summary['acc_during']] print('accs: ', accs) for acc in accs: f.write(str(acc) + '\n') with open(os.path.join(config.out_dir, 'backward'), 'w') as f: accs = [acc * 0.01 for acc in shared.summary['acc_final']] print('accs: ', accs) for acc in accs: f.write(str(acc) + '\n')
def _get_performance_summary(out_dir, cmd_ident): """Parse the performance summary file of a simulation. This is a very primitive parser, that expects that each line of the result file :code:`os.path.join(out_dir, _SUMMARY_FILENAME)` is a keyword-value pair. The keyword is taken from the :code:`_SUMMARY_KEYWORDS` list. **They must appear in the correct order.** The value can either be a single number or a list of numbers. A list of numbers will be converted into a string, such that it appears in a single cell under the given keyword when opening the result CSV file with a spreadsheet. Args: out_dir: The output directory of the simulation. cmd_ident (int): Identifier of this command (needed for informative error messages). Raises: IOError: If performance summary file does not exist. ValueError: If a summary key is not at the expected position in the result file. Returns: A dictionary containing strings as keywords. Note, the values may not be lists, and strings need to be wrapped into an extra layer of double quotes such that the spreadsheet interprets them as a single entity. """ # Get training results. result_summary_fn = os.path.join(out_dir, _SUMMARY_FILENAME) if not os.path.exists(result_summary_fn): raise IOError('Training run %d did not finish. No results!' \ % (cmd_ident+1)) with open(result_summary_fn, 'r') as f: result_summary = f.readlines() # Ensure downwards compatibility! summary_keys = _SUMMARY_KEYWORDS performance_dict = dict() for line, key in zip(result_summary, summary_keys): if not line.startswith(key): raise ValueError('Key %s does not appear in performance ' % (key) + 'summary where it is expected.') # Parse the lines of the result file. # Crop keyword to retrieve only the value. _, line = line.split(' ', maxsplit=1) # https://stackoverflow.com/questions/4703390/how-to-extract-a-floating-number-from-a-string line_nums = re.findall(r"[-+]?\d*\.\d+|\d+", line) if len(line_nums) == 1: # Single number performance_dict[key] = [line_nums[0]] else: # List of numbers # Convert list to a string for the resulting CSV file. Note, the # quotes are needed that the list will be written into a single cell # when opening the csv file (note, every key can have exactly one # value). performance_dict[key] = \ ['"' + misc.list_to_str(line_nums, delim=',') + '"'] return performance_dict
def save_summary_dict(config, shared): """Write a text file in the result folder that gives a quick overview over the results achieved so far. Args: (....): See docstring of function :func:`setup_summary_dict`. """ # "setup_summary_dict" must be called first. assert (hasattr(shared, 'summary')) summary_fn = 'performance_overview.txt' #summary_fn = hpbbb._SUMMARY_FILENAME with open(os.path.join(config.out_dir, summary_fn), 'w') as f: for k, v in shared.summary.items(): if isinstance(v, list): f.write('%s %s\n' % (k, utils.list_to_str(v))) elif isinstance(v, float): f.write('%s %f\n' % (k, v)) else: f.write('%s %d\n' % (k, v))