def generate_md_table(data, headers): writer = MarkdownTableWriter() writer.headers = headers writer.column_styles = [Style(align="center", font_weight="bold") ] * len(headers) writer.value_matrix = data return writer.dumps()
def debug(self): parallel_matches = self._parallelize() writer = MarkdownTableWriter() writer.table_name = "debug" writer.headers = ["Src slice", "Index src", "Text src", "", "Text tgt", "Index tgt", "Tgt slice"] writer.column_styles = [Style(align='center')]*7 rows = [] for source_sequence, target_sequence in parallel_matches: source_sequence.context = self._source.context target_sequence.context = self._target.context col1 = source_sequence.slice_representation() col2 = "\n".join([str(i) for i in source_sequence.iter_index()]) col3 = "\n".join([s.context.get_sequence_text(s) for s in source_sequence]) col4 = '--->' col5 = "\n".join([s.context.get_sequence_text(s) for s in target_sequence]) col6 = "\n".join([str(i) for i in target_sequence.iter_index()]) col7 = target_sequence.slice_representation() for a, b, c, d, e, f, g in zip_longest( [col1], col2.split('\n'), col3.split('\n'), [col4], col5.split('\n'), col6.split('\n'), [col7]): rows.append([ a, b, c, d, e, f, g ]) rows.append([""]*7) rows.pop() writer.value_matrix = rows writer.write_table()
def args_to_md(model,args_dict): writer = MarkdownTableWriter() writer.table_name = model writer.headers=list(args_dict.keys()) # print('headers: ',writer.headers) writer.value_matrix=[list(args_dict.values())] # print('value_matrix: ',writer.value_matrix) writer.column_styles = [Style(align="center") for _ in range(len(writer.headers))] print(writer.write_table())
def generate_report(self, report_path: str): """ Generate a markdown report of enumeration data for the remote host. This report is generated from all facts which pwncat is capable of enumerating. It does not need nor honor the type or provider options. """ # Dictionary mapping type names to facts. Each type name is mapped # to a dictionary which maps sources to a list of facts. This makes # organizing the output report easier. report_data: Dict[str, Dict[str, List[pwncat.db.Fact]]] = {} system_details = [] try: # Grab hostname hostname = pwncat.victim.enumerate.first("system.hostname").data system_details.append(["Hostname", util.escape_markdown(hostname)]) except ValueError: hostname = "[unknown-hostname]" # Not provided by enumerate, but natively known due to our connection system_details.append( ["Primary Address", util.escape_markdown(pwncat.victim.host.ip)]) system_details.append( ["Derived Hash", util.escape_markdown(pwncat.victim.host.hash)]) try: # Grab distribution distro = pwncat.victim.enumerate.first("system.distro").data system_details.append([ "Distribution", util.escape_markdown( f"{distro.name} ({distro.ident}) {distro.version}"), ]) except ValueError: pass try: # Grab the architecture arch = pwncat.victim.enumerate.first("system.arch").data system_details.append( ["Architecture", util.escape_markdown(arch.arch)]) except ValueError: pass try: # Grab kernel version kernel = pwncat.victim.enumerate.first( "system.kernel.version").data system_details.append([ "Kernel", util.escape_markdown( f"Linux Kernel {kernel.major}.{kernel.minor}.{kernel.patch}-{kernel.abi}" ), ]) except ValueError: pass try: # Grab SELinux State selinux = pwncat.victim.enumerate.first("system.selinux").data system_details.append( ["SELinux", util.escape_markdown(selinux.state)]) except ValueError: pass try: # Grab ASLR State aslr = pwncat.victim.enumerate.first("system.aslr").data system_details.append( ["ASLR", "disabled" if aslr.state == 0 else "enabled"]) except ValueError: pass try: # Grab init system init = pwncat.victim.enumerate.first("system.init").data system_details.append( ["Init", util.escape_markdown(str(init.init))]) except ValueError: pass try: # Check if we are in a container container = pwncat.victim.enumerate.first("system.container").data system_details.append( ["Container", util.escape_markdown(container.type)]) except ValueError: pass # Build the table writer for the main section table_writer = MarkdownTableWriter() table_writer.headers = ["Property", "Value"] table_writer.column_styles = [ pytablewriter.style.Style(align="right"), pytablewriter.style.Style(align="center"), ] table_writer.value_matrix = system_details table_writer.margin = 1 # Note enumeration data we don't need anymore. These are handled above # in the system_details table which is output with the table_writer. ignore_types = [ "system.hostname", "system.kernel.version", "system.distro", "system.init", "system.arch", "system.aslr", "system.container", ] # This is the list of known enumeration types that we want to # happen first in this order. Other types will still be output # but will be output in an arbitrary order following this list ordered_types = [ # Sudo privileges "sudo", # Possible kernel exploits - very important "system.kernel.exploit", # Enumerated user passwords - very important "system.user.password", # Enumerated possible user private keys - very important "system.user.private_key", # Directories in our path that are writable "writable_path", ] # These types are very noisy. They are important for full enumeration, # but are better suited for the end of the list. These are output last # no matter what in this order. noisy_types = [ # System services. There's normally a lot of these "system.service", # Installed packages. There's *always* a lot of these "system.package", ] with Progress( "enumerating report data", "•", "[cyan]{task.fields[status]}", transient=True, console=console, ) as progress: task = progress.add_task("", status="initializing") for fact in pwncat.victim.enumerate(): progress.update(task, status=str(fact.data)) if fact.type in ignore_types: continue if fact.type not in report_data: report_data[fact.type] = {} if fact.source not in report_data[fact.type]: report_data[fact.type][fact.source] = [] report_data[fact.type][fact.source].append(fact) try: with open(report_path, "w") as filp: filp.write(f"# {hostname} - {pwncat.victim.host.ip}\n\n") # Write the system info table table_writer.dump(filp, close_after_write=False) filp.write("\n") # output ordered types first for typ in ordered_types: if typ not in report_data: continue self.render_section(filp, typ, report_data[typ]) # output everything that's not a ordered or noisy type for typ, sources in report_data.items(): if typ in ordered_types or typ in noisy_types: continue self.render_section(filp, typ, sources) # Output the noisy types for typ in noisy_types: if typ not in report_data: continue self.render_section(filp, typ, report_data[typ]) console.log( f"enumeration report written to [cyan]{report_path}[/cyan]") except OSError as exc: console.log(f"[red]error[/red]: [cyan]{report_path}[/cyan]: {exc}")
def generate_markdown(to, topics, questions): timestamp = 'latest updated at {}'.format(date.today().strftime("%Y/%m/%d")) # generate to questions.md question_json = { 'Number': [], 'Title': [], 'Level': [], 'Accepted': [], 'Submissions': [], 'Acceptance': [] } number = '{:,}' acceptance = '{:.0f}%' title = '[{}](https://leetcode.com/problems/{})' easy = 0 medium = 0 hard = 0 for qid in sorted(questions.keys()): q = questions[qid] question_json['Number'].append(qid) question_json['Title'].append(title.format(q['title'], q['title_slug'])) question_json['Level'].append(LEVELS[q['level']]) question_json['Accepted'].append(number.format(q['accepted'])) question_json['Submissions'].append(number.format(q['submissions'])) question_json['Acceptance'].append(acceptance.format(q['acceptance'])) if q['level'] == 1: easy += 1 if q['level'] == 2: medium += 1 if q['level'] == 3: hard += 1 questions_md = os.path.join(to, 'questions.md') with open(questions_md, 'w') as md: total = len(questions.keys()) md.write('# List of All Questions\n\n') md.write('**Total Questions: {}, Easy: {}, Medium: {}, Hard: {}, {}.**\n\n'.format( total, easy, medium, hard, timestamp)) table = pandas.DataFrame.from_dict(question_json) writer = MarkdownTableWriter() writer.from_dataframe(table) writer.column_styles = [ Style(align="right"), Style(align="left"), Style(align="center"), Style(align="right"), Style(align="right"), Style(align="right") ] md.write(writer.dumps()) md.write('\n\n') print('Created {}.'.format(questions_md)) # generate to topics.md topic_slug_to_name = {} name = '[{}]({}.md)' topic_json = { 'Name': [], 'Total': [], 'Easy': [], 'Medium': [], 'Hard': [] } for t in topics: topic_slug_to_name[t['slug']] = t['name'] topic_json['Name'].append(name.format(t['name'], t['slug'])) topic_json['Total'].append(number.format(len(t['questions']))) topic_json['Easy'].append(number.format(len(t['difficulty']['easy']))) topic_json['Medium'].append(number.format(len(t['difficulty']['medium']))) topic_json['Hard'].append(number.format(len(t['difficulty']['hard']))) topics_md = os.path.join(to, 'topics.md') with open(topics_md, 'w') as md: md.write('# List of All Topics\n\n') md.write('**Total Questions: {}, Easy: {}, Medium: {}, Hard: {}, {}.**\n\n'.format( total, easy, medium, hard, timestamp)) table = pandas.DataFrame.from_dict(topic_json) writer = MarkdownTableWriter() writer.from_dataframe(table) writer.column_styles = [ Style(align="left"), Style(align="right"), Style(align="right"), Style(align="right"), Style(align="right") ] md.write(writer.dumps()) md.write('\n\n') print('Created {}.'.format(topics_md)) # generate to each of topics for t in topics: topic_name_md = os.path.join(to, t['slug'] + '.md') with open(topic_name_md, 'w') as md: total = len(t['questions']) easy = 0 medium = 0 hard = 0 for qid in t['questions']: if questions[qid]['level'] == 1: easy += 1 if questions[qid]['level'] == 2: medium += 1 if questions[qid]['level'] == 3: hard += 1 md.write('# List of All Questions in {}\n\n'.format(t['name'])) md.write('**Total Questions: {}, Easy: {}, Medium: {}, Hard: {}, {}.**\n\n'.format( total, easy, medium, hard, timestamp)) md.write('- [{}](#{})\n'.format(t['name'], t['name'].replace(' ', '-'))) for k in sorted(t['similarities'].keys()): similarity_topics = map(lambda _t: topic_slug_to_name[_t], t['similarities'][k]['topics']) similarity_topics = sorted(similarity_topics) md.write('- [{}](#{})\n'.format(', '.join(similarity_topics), '-'.join(similarity_topics).replace(' ', '-'))) md.write('\n') question_json = { 'Number': [], 'Title': [], 'Level': [], 'Accepted': [], 'Submissions': [], 'Acceptance': [] } for qid in sorted(t['questions']): if len(questions[qid]['topics']) == 1: q = questions[qid] question_json['Number'].append(qid) question_json['Title'].append(title.format(q['title'], q['title_slug'])) question_json['Level'].append(LEVELS[q['level']]) question_json['Accepted'].append(number.format(q['accepted'])) question_json['Submissions'].append(number.format(q['submissions'])) question_json['Acceptance'].append(acceptance.format(q['acceptance'])) md.write('## {}\n\n'.format(t['name'])) table = pandas.DataFrame.from_dict(question_json) writer = MarkdownTableWriter() writer.from_dataframe(table) writer.column_styles = [ Style(align="right"), Style(align="left"), Style(align="center"), Style(align="right"), Style(align="right"), Style(align="right") ] md.write(writer.dumps()) md.write('\n\n') for k in sorted(t['similarities'].keys()): question_json = { 'Number': [], 'Title': [], 'Level': [], 'Accepted': [], 'Submissions': [], 'Acceptance': [] } similarity_topics = map(lambda _t: topic_slug_to_name[_t], t['similarities'][k]['topics']) similarity_topics = sorted(similarity_topics) similarity_questions = sorted(t['similarities'][k]['questions']) for qid in similarity_questions: q = questions[qid] question_json['Number'].append(qid) question_json['Title'].append(title.format(q['title'], q['title_slug'])) question_json['Level'].append(LEVELS[q['level']]) question_json['Accepted'].append(number.format(q['accepted'])) question_json['Submissions'].append(number.format(q['submissions'])) question_json['Acceptance'].append(acceptance.format(q['acceptance'])) md.write('## {}\n\n'.format(', '.join(similarity_topics))) table = pandas.DataFrame.from_dict(question_json) writer = MarkdownTableWriter() writer.from_dataframe(table) writer.column_styles = [ Style(align="right"), Style(align="left"), Style(align="center"), Style(align="right"), Style(align="right"), Style(align="right") ] md.write(writer.dumps()) md.write('\n\n') print('Created {}.'.format(topic_name_md))
float(args_dict["WikiTestPPL"])) # args_dict["min_ppl"]= "{:4.2f}".format(float(args_dict["WikiTestPPL"])) values = [ "**" + str(args_dict.get(key, '-')) + "**" for key in headers ] else: values = [str(args_dict.get(key, '-')) for key in headers] # if float(args_dict["tau"])==0.1 and int(args_dict["pretrained"])==1 and int(args_dict["nlayers"])==1: if True: all_values.append(values) writer.table_name = "SemiSupChain" writer.headers = headers writer.value_matrix = all_values writer.column_styles = [ Style(align="center") for _ in range(len(writer.headers)) ] writer.write_table() # args_to_excel("chain",headers,all_values) # args_to_latex("chain",headers,all_values) args_to_csv("chain", headers, all_values) def args_to_md(model, args_dict): writer = MarkdownTableWriter() writer.table_name = model writer.headers = list(args_dict.keys()) # print('headers: ',writer.headers) writer.value_matrix = [list(args_dict.values())] # print('value_matrix: ',writer.value_matrix) writer.column_styles = [