def epoch_table(model_epochs, perturbations_files, perturbations_epochs, config_training_names, attack_configs): """ Create markdown table summarizing epochs of models and adversarial examples. :param model_epochs: model epochs as from load() :type model_epochs: [int] :param perturbations_files: perturbation files as from load() :type perturbations_files: [str] :param perturbations_epochs: perturbation epochs as from load() :type perturbations_epochs: [int] :param config_training_names: configuration training names as from load() :type config_training_names: [str] :param attack_configs: attack configurations as from module() :type attack_configs: [experiments.AttackConfig] :return: markdown table :rtype: str """ table_data = [['Model\Attack', 'Training'] + list(range(len(attack_configs)))] for i in range(len(perturbations_files)): table_data_ = [str(config_training_names[i]), model_epochs[i]] for j in range(len(perturbations_files[i])): if not os.path.exists(perturbations_files[i][j]): table_data_.append('--') else: table_data_.append(str(perturbations_epochs[i][j])) table_data.append(table_data_) table = terminaltables.GithubFlavoredMarkdownTable(table_data) return table.table
def print_total_bytes(large_data, do_table=True, style='ascii'): """ :param large_data: :param do_table: :param style: can be ascii, markdown, or single. Sets the table style :return: """ gbg_data = large_data[0:2] if len(large_data) > 2 else large_data # click.secho(f"Dbg: {gbg_data}",fg="blue") total_bytes = sizeof_fmt(gbg_data[1]) gbg_bytes = sizeof_fmt(gbg_data[0]) click.secho(f"Read {total_bytes}", fg="green") click.secho(f"GBG Res: {gbg_bytes}", fg="green") if do_table: headers = ['Filename', 'Bytes Read', 'Filesize'] data = [headers] lgd = large_data[2] lgd[:, 0] = np.apply_along_axis(lambda x: x[0].name, 1, lgd) data = [headers] [data.append(v) for v in lgd] if style == 'markdown': table = terminaltables.GithubFlavoredMarkdownTable(data) elif style == 'ascii': table = terminaltables.AsciiTable(data) else: table = terminaltables.SingleTable(data) click.echo_via_pager(f"{table.table}")
def distal_markdown_table(individual_adversarial_evaluations, config_training_names, config_attack_names, tpr): """ Generate distal markdown table. """ table_data = [[ '', 'Attack', '', 'Model', 'ROC AUC', 'FPR@%s%%TPR' % tpr, '\\tau@%s%%TPR' % tpr, 'Test TPR@%s%%TPR' % tpr, 'Val TPR@%s%%TPR' % tpr ]] # heading for j in range(len(individual_adversarial_evaluations[0])): for i in range(len(individual_adversarial_evaluations)): table_data_ = [ j, config_attack_names[j], i, config_training_names[i], ] if individual_adversarial_evaluations[i][j] is None: table_data_ += [ '--', '--', '--', '--', '--', ] else: table_data_ += [ '%.2f' % (round( individual_adversarial_evaluations[i] [j].receiver_operating_characteristic_auc(), 2)), '%.2f' % (round( getattr(individual_adversarial_evaluations[i][j], 'fpr_at_%stpr' % tpr)(), 3) * 100), '%.2f' % (round( getattr(individual_adversarial_evaluations[i][j], 'confidence_at_%stpr' % tpr)(), 2)), '%.2f' % (round( getattr(individual_adversarial_evaluations[i][j], 'tpr_at_%stpr' % tpr)(), 3)), '%.3f' % (round( getattr(individual_adversarial_evaluations[i][j], 'validation_tpr_at_%stpr' % tpr)(), 3)), ] table_data.append(table_data_) table = terminaltables.GithubFlavoredMarkdownTable(table_data) return table.table
def _set_style(self, style): if hasattr(self, '_grid'): data = self._grid.table_data else: data = [] if style.lower().startswith('a'): self._style = 'ascii' self._grid = tt.AsciiTable(data) elif style.lower().startswith('d'): self._style = 'double' self._grid = tt.DoubleTable(data) elif style.lower().startswith('s'): self._style = 'single' self._grid = tt.SingleTable(data) elif style.lower()[0] in ['g', 'm']: self._style = 'markdown' self._grid = tt.GithubFlavoredMarkdownTable(data) self._grid.inner_heading_row_border = True self._grid.padding_left = 3 self._grid.padding_right = 3 self._grid.justify_columns = { col: 'center' for col in range(self.ncols) }
def corrupted_markdown_table(ood_evaluations, config_training_names, ood_names, tpr): """ Generate corrupted markdown table. """ table_data = [[ '', '', 'Corruption', 'Model', 'Clean TE', 'Corr TE', 'ROC AUC', 'FPR@%s%%TPR' % tpr, 'TNR@%s%%TPR' % tpr, 'Corr TE@%s%%TPR' % tpr, 'tau@%s%%TPR' % tpr, 'Test TPR@%s%%TPR' % tpr, 'Val TPR@%s%%TPR' % tpr ]] # heading values = [] for j in range(len(ood_evaluations[0])): values_ = [] for i in range(len(ood_evaluations)): assert j < len(ood_names), (j, len(ood_names), len(ood_evaluations[i])) table_data_ = [ i, j, config_training_names[i], ood_names[j], ] table_values = [ round(ood_evaluations[i][j].test_error() * 100, 2), round(ood_evaluations[i][j].corrupted_test_error() * 100, 2), round( ood_evaluations[i] [j].receiver_operating_characteristic_auc(), 2), round( getattr(ood_evaluations[i][j], 'fpr_at_%stpr' % tpr)(), 3) * 100, round( getattr(ood_evaluations[i][j], 'tnr_at_%stpr' % tpr)(), 3) * 100, round( getattr(ood_evaluations[i][j], 'corrupted_test_error_at_%stpr' % tpr)() * 100, 2), round( getattr(ood_evaluations[i][j], 'confidence_at_%stpr' % tpr)(), 2), round( getattr(ood_evaluations[i][j], 'tpr_at_%stpr' % tpr)(), 3) * 100, round( getattr(ood_evaluations[i][j], 'validation_tpr_at_%stpr' % tpr)(), 3) * 100, ] table_data_ += [ '%.2f' % table_values[0], '%.2f' % table_values[1], '%.2f' % table_values[2], '%.2f' % table_values[3], '%.2f' % table_values[4], '%.2f' % table_values[5], '%.2f' % table_values[6], '%.2f' % table_values[7], '%.2f' % table_values[8], ] values_.append(table_values) table_data.append(table_data_) values.append(values_) for i in range(len(ood_evaluations)): table_data_ = [ i - 1, config_training_names[i], 'mean', ] table_data_ += [ '%.2f' % numpy.mean( [values[j][i][0] for j in range(len(ood_evaluations[0]))]), '%.2f' % numpy.mean( [values[j][i][1] for j in range(len(ood_evaluations[0]))]), '%.2f' % numpy.mean( [values[j][i][2] for j in range(len(ood_evaluations[0]))]), '%.2f' % numpy.mean( [values[j][i][3] for j in range(len(ood_evaluations[0]))]), '%.2f' % numpy.mean( [values[j][i][4] for j in range(len(ood_evaluations[0]))]), '%.2f' % numpy.mean( [values[j][i][5] for j in range(len(ood_evaluations[0]))]), '%.2f' % numpy.mean( [values[j][i][6] for j in range(len(ood_evaluations[0]))]), '%.2f' % numpy.mean( [values[j][i][7] for j in range(len(ood_evaluations[0]))]), '%.2f' % numpy.mean( [values[j][i][8] for j in range(len(ood_evaluations[0]))]), ] table_data.append(table_data_) table = terminaltables.GithubFlavoredMarkdownTable(table_data) return table.table
"xz -d < %(ifn)s >%(ofn)s")) table_data.append( run_bench("xz -5", "xz -5 < %(ifn)s >%(ofn)s", "xz -d < %(ifn)s >%(ofn)s")) table_data.append( run_bench("brotli -7", "brotli -7 < %(ifn)s >%(ofn)s", "brotli -d < %(ifn)s >%(ofn)s")) table_data.append( run_bench("brotli -8", "brotli -8 < %(ifn)s >%(ofn)s", "brotli -d < %(ifn)s >%(ofn)s")) table_data.append( run_bench("brotli -9", "brotli -9 < %(ifn)s >%(ofn)s", "brotli -d < %(ifn)s >%(ofn)s")) table_data.append( run_bench("lzfse", "lzfse -encode < %(ifn)s > %(ofn)s", "lzfse -decode < %(ifn)s > %(ofn)s")) table_data.append( run_bench("zstd -17", "zstd -11< %(ifn)s > %(ofn)s", "zstd -d < %(ifn)s > %(ofn)s")) table_data.append( run_bench("zstd -18", "zstd -15< %(ifn)s > %(ofn)s", "zstd -d < %(ifn)s > %(ofn)s")) table_data.append( run_bench("zstd -19", "zstd -19< %(ifn)s > %(ofn)s", "zstd -d < %(ifn)s > %(ofn)s")) table_data[1:] = sorted(table_data[1:], key=lambda row: int(row[1].replace(",", ""))) table = terminaltables.GithubFlavoredMarkdownTable(table_data) print(table.table)