예제 #1
0
def convert(csv_file_path, html_file_path,table_name):
    print("this python script read table in ",csv_file_path, " and then write it into html file ",html_file_path)
    print("converting...")
    #writer = HtmlTableWriter()
    writer = MarkdownTableWriter()
    #writer.is_write_header=False
    writer.from_csv(csv_file_path)
    writer.table_name = table_name


    #get the column for link
    link_col=0
    for head in writer.headers:
        #print(head)
        if ( head == 'website' ):
            break
        else:
            link_col +=1
    #debug
    link_col_flag = True
    if ( link_col >= len(writer.headers) ):
        link_col_flag = False
        #raise ValueError('no column for link')
    
    print('get link_col = ',link_col)

    #get the column for wuzi
    wuzi_col=0
    for head in writer.headers:
        #print(head)
        if ( head == 'repo' ):
            break
        else:
            wuzi_col +=1
    #debug
    wuzi_col_flag = True
    if ( wuzi_col >= len(writer.headers) ):
        wuzi_col_flag = False
        #raise ValueError('no column for wuzi')
    
    print('get wuzi_col = ',wuzi_col)

    
    #limit width of some column to be
    col_fixed_width=wuzi_col
    col_width=24
    #modify the link in the fourth row to be an html link
    
    for row in writer.value_matrix:
        if wuzi_col_flag and ( row[wuzi_col] != '' ):
            row[wuzi_col]='[repo]('+row[wuzi_col]+')'

        if link_col_flag and ( row[link_col] != '' ):
            row[link_col]='[website]('+row[link_col]+')'
    writer.dump(html_file_path)
    print("done with ",table_name)
예제 #2
0
def test(file_path, markdown_file_path):
    file_path = 'temp2.csv'
    loader = ExcelTableFileLoader(file_path)
    writer = MarkdownTableWriter()
    writer.is_write_header = False
    tabledata = loader.load()
    tabledata[1] = tabledata[2][0]
    writer.from_tabledata(tabledata, False)

    #writer.table_name=table_name
    writer.dump(markdown_file_path)
예제 #3
0
def main():
    # prepare data ---
    csv_file_path = "data.csv"
    markdown_file_path = ("data.md")
    print("this python script read table in ", csv_file_path,
          " and then write it into markdown file ", markdown_file_path)
    print("converting...")
    writer = MarkdownTableWriter()
    writer.from_csv(csv_file_path)
    writer.margin = 1
    writer.dump(markdown_file_path)
    print("done")
예제 #4
0
def main():
    global has_imagenet_reassessed

    args = parse_args()

    torch.backends.cudnn.benchmark = True
    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        dist_params = dict(backend='nccl', init_method='env://')
        init_dist(args.launcher, **dist_params)

    results = []

    # pretrained_model_dir: a single model
    result = test(args, args.pretrained_model_dir, distributed)
    if result is not None:
        results.append(result)

    # pretrained_model_dir: a parent folder
    for model_dir in os.scandir(args.pretrained_model_dir):
        if not model_dir.is_dir():
            continue
        result = test(args, model_dir, distributed)
        if result is None:
            continue

        results.append(result)

    if dist.get_rank() == 0:
        # save
        mmcv.dump(
            results,
            os.path.join(args.pretrained_model_dir, 'ImageNet-X-Eval.pkl'))

        # output markdown table
        writer = MarkdownTableWriter()
        writer.table_name = "Top-1 and Top-5 Error Rates"
        writer.headers = ["Model", "Params (M)"]

        for dataset in imagenet_x:
            writer.headers.append(dataset)
            if dataset == 'imagenet-1k' and has_imagenet_reassessed:
                writer.headers.append('imagenet-1k-reassessed')

        writer.value_matrix = results
        writer.margin = 1  # add a whitespace for both sides of each cell
        writer.dump(
            os.path.join(args.pretrained_model_dir,
                         'ImageNet-X-Eval-Table.txt'))
예제 #5
0
def write_key_description():
    keys = [file.split(".dat")[0] for file in os.listdir(files[-1]) if file.endswith(".dat")]
    m = describe_keys_for_files(keys, [file.split("/")[-1] for file in files])

    data = m.as_array("section")

    mw = MarkdownTableWriter()
    mw.table_name = "Comparison of lengths of sections"
    mw.headers = data[0]
    mw.value_matrix = data[1:]

    with open("file_inspection/section_length.md", 'w') as f:
        mw.dump(f)
예제 #6
0
def main():
    args = parse_args()
    table_cache = mmcv.load(args.table_cache)
    output_dir = args.out

    writer = MarkdownTableWriter()
    writer.headers = [
        'Method', 'Backbone', 'Crop Size', 'Lr schd', 'Mem (GB)',
        'Inf time (fps)', 'mIoU', 'mIoU(ms+flip)', 'download'
    ]
    writer.margin = 1
    writer.align_list = [Align.CENTER] * len(writer.headers)
    dataset_maps = {
        'cityscapes': 'Cityscapes',
        'ade20k': 'ADE20K',
        'voc12aug': 'Pascal VOC 2012 + Aug'
    }
    for directory in table_cache:
        for dataset in table_cache[directory]:
            table = table_cache[directory][dataset][0]
            writer.table_name = dataset_maps[dataset]
            writer.value_matrix = table
            for i in range(len(table)):
                if table[i][-4] != '-':
                    table[i][-4] = f'{table[i][-4]:.2f}'
            mmcv.mkdir_or_exist(osp.join(output_dir, directory))
            writer.dump(osp.join(output_dir, directory,
                                 f'README_{dataset}.md'))
        with open(osp.join(output_dir, directory, 'README.md'), 'w') as dst_f:
            for dataset in dataset_maps:
                dataset_md_file = osp.join(output_dir, directory,
                                           f'README_{dataset}.md')
                with open(dataset_md_file) as src_f:
                    for line in src_f:
                        dst_f.write(line)
                    dst_f.write('\n')
                os.remove(dataset_md_file)
예제 #7
0
def write_markdown_table(stream: TextIO, results: ResultsDict, method: str):

    for k, v in results.items():
        headers = list(v.keys())
        break

    value_matrix = []
    for k, v in results.items():
        row = [str(k)]
        for h in headers:
            value = v[h].get(method)
            if value is None:
                new_value = ""
            else:
                new_value = format(value, ".04f")
            row.append(new_value)
        value_matrix.append(row)

    headers = ["items"] + headers

    writer = MarkdownTableWriter(table_name=method,
                                 headers=headers,
                                 value_matrix=value_matrix)
    writer.dump(stream, close_after_write=False)
예제 #8
0
from pytablewriter import MarkdownTableWriter
import json

writer = MarkdownTableWriter()
writer.table_name = "Intent Cross-Validation Results (5 folds)"

with open("results/intent_report.json", "r") as f:
    data = json.loads(f.read())

cols = ["support", "f1-score", "confused_with"]
writer.headers = ["class"] + cols

classes = list(data.keys())
classes.sort(key=lambda x: data[x]["support"], reverse=True)


def format_cell(data, c, k):
    if not data[c].get(k):
        return "N/A"
    if k == "confused_with":
        return ", ".join([f"{k}({v})" for k, v in data[c][k].items()])
    else:
        return data[c][k]


writer.value_matrix = [[c] + [format_cell(data, c, k) for k in cols]
                       for c in classes]

writer.dump("results.md")
예제 #9
0
    def generate_report(self, report_path: str):
        """ Generate a markdown report of enumeration data for the remote host. This
        report is generated from all facts which pwncat is capable of enumerating.
        It does not need nor honor the type or provider options. """

        # Dictionary mapping type names to facts. Each type name is mapped
        # to a dictionary which maps sources to a list of facts. This makes
        # organizing the output report easier.
        report_data: Dict[str, Dict[str, List[pwncat.db.Fact]]] = {}
        system_details = []

        try:
            # Grab hostname
            hostname = pwncat.victim.enumerate.first("system.hostname").data
            system_details.append(["Hostname", util.escape_markdown(hostname)])
        except ValueError:
            hostname = "[unknown-hostname]"

        # Not provided by enumerate, but natively known due to our connection
        system_details.append(
            ["Primary Address",
             util.escape_markdown(pwncat.victim.host.ip)])
        system_details.append(
            ["Derived Hash",
             util.escape_markdown(pwncat.victim.host.hash)])

        try:
            # Grab distribution
            distro = pwncat.victim.enumerate.first("system.distro").data
            system_details.append([
                "Distribution",
                util.escape_markdown(
                    f"{distro.name} ({distro.ident}) {distro.version}"),
            ])
        except ValueError:
            pass

        try:
            # Grab the architecture
            arch = pwncat.victim.enumerate.first("system.arch").data
            system_details.append(
                ["Architecture",
                 util.escape_markdown(arch.arch)])
        except ValueError:
            pass

        try:
            # Grab kernel version
            kernel = pwncat.victim.enumerate.first(
                "system.kernel.version").data
            system_details.append([
                "Kernel",
                util.escape_markdown(
                    f"Linux Kernel {kernel.major}.{kernel.minor}.{kernel.patch}-{kernel.abi}"
                ),
            ])
        except ValueError:
            pass

        try:
            # Grab SELinux State
            selinux = pwncat.victim.enumerate.first("system.selinux").data
            system_details.append(
                ["SELinux", util.escape_markdown(selinux.state)])
        except ValueError:
            pass

        try:
            # Grab ASLR State
            aslr = pwncat.victim.enumerate.first("system.aslr").data
            system_details.append(
                ["ASLR", "disabled" if aslr.state == 0 else "enabled"])
        except ValueError:
            pass

        try:
            # Grab init system
            init = pwncat.victim.enumerate.first("system.init").data
            system_details.append(
                ["Init", util.escape_markdown(str(init.init))])
        except ValueError:
            pass

        try:
            # Check if we are in a container
            container = pwncat.victim.enumerate.first("system.container").data
            system_details.append(
                ["Container",
                 util.escape_markdown(container.type)])
        except ValueError:
            pass

        # Build the table writer for the main section
        table_writer = MarkdownTableWriter()
        table_writer.headers = ["Property", "Value"]
        table_writer.column_styles = [
            pytablewriter.style.Style(align="right"),
            pytablewriter.style.Style(align="center"),
        ]
        table_writer.value_matrix = system_details
        table_writer.margin = 1

        # Note enumeration data we don't need anymore. These are handled above
        # in the system_details table which is output with the table_writer.
        ignore_types = [
            "system.hostname",
            "system.kernel.version",
            "system.distro",
            "system.init",
            "system.arch",
            "system.aslr",
            "system.container",
        ]

        # This is the list of known enumeration types that we want to
        # happen first in this order. Other types will still be output
        # but will be output in an arbitrary order following this list
        ordered_types = [
            # Sudo privileges
            "sudo",
            # Possible kernel exploits - very important
            "system.kernel.exploit",
            # Enumerated user passwords - very important
            "system.user.password",
            # Enumerated possible user private keys - very important
            "system.user.private_key",
            # Directories in our path that are writable
            "writable_path",
        ]

        # These types are very noisy. They are important for full enumeration,
        # but are better suited for the end of the list. These are output last
        # no matter what in this order.
        noisy_types = [
            # System services. There's normally a lot of these
            "system.service",
            # Installed packages. There's *always* a lot of these
            "system.package",
        ]

        with Progress(
                "enumerating report data",
                "•",
                "[cyan]{task.fields[status]}",
                transient=True,
                console=console,
        ) as progress:
            task = progress.add_task("", status="initializing")
            for fact in pwncat.victim.enumerate():
                progress.update(task, status=str(fact.data))
                if fact.type in ignore_types:
                    continue
                if fact.type not in report_data:
                    report_data[fact.type] = {}
                if fact.source not in report_data[fact.type]:
                    report_data[fact.type][fact.source] = []
                report_data[fact.type][fact.source].append(fact)

        try:
            with open(report_path, "w") as filp:
                filp.write(f"# {hostname} - {pwncat.victim.host.ip}\n\n")

                # Write the system info table
                table_writer.dump(filp, close_after_write=False)
                filp.write("\n")

                # output ordered types first
                for typ in ordered_types:
                    if typ not in report_data:
                        continue
                    self.render_section(filp, typ, report_data[typ])

                # output everything that's not a ordered or noisy type
                for typ, sources in report_data.items():
                    if typ in ordered_types or typ in noisy_types:
                        continue
                    self.render_section(filp, typ, sources)

                # Output the noisy types
                for typ in noisy_types:
                    if typ not in report_data:
                        continue
                    self.render_section(filp, typ, report_data[typ])

            console.log(
                f"enumeration report written to [cyan]{report_path}[/cyan]")
        except OSError as exc:
            console.log(f"[red]error[/red]: [cyan]{report_path}[/cyan]: {exc}")
예제 #10
0
#!/usr/bin/env python3

from pytablewriter import MarkdownTableWriter
import subprocess

exit_code = subprocess.call("./get_versions.sh")

with open("docs/r_packages.txt", "r") as f:
    r_pkg = [l.strip().split() for l in f.readlines()]

writer = MarkdownTableWriter()
writer.table_name = "Available R Packages"
writer.headers = ["Package", "Version"]
writer.value_matrix = r_pkg

writer.dump("docs/r_table.md")

with open("docs/python_packages.txt", "r") as f:
    py_pkg = [l.strip().split("==") for l in f.readlines()]

writer = MarkdownTableWriter()
writer.table_name = "Available Python Packages"
writer.headers = ["Package", "Version"]
writer.value_matrix = py_pkg

writer.dump("docs/python_table.md")
예제 #11
0
    writer.headers = ["ISO3 Code", "Country", "Boundary type"]
    writer.value_matrix = []

    for country in config.countries:
        if desired_output["countryConfigKey"] in country:
            admin_level_info = country[desired_output["countryConfigKey"]]
            writer.value_matrix.append([
                country['countryIsoAlpha3Code'], country['countryName'],
                admin_level_info['localName']
            ])
        else:
            writer.value_matrix.append([
                country['countryIsoAlpha3Code'], country['countryName'], 'N/A'
            ])

    writer.dump(os.path.join(out_dir, "README.md"))

    htmlWrite.from_tabledata(writer.tabledata)
    htmlWrite.dump(os.path.join(out_dir, "README.html"))

    writer2 = MarkdownTableWriter()
    writer2.table_name = f"{desired_output['friendlyName']} Values"
    writer2.headers = [
        f"{desired_output['shortName']}_NAME",
        f"{desired_output['shortName']}_CODE", 'ISO3 Code',
        'Local Boundary Name', 'Source Geometry Dataset'
    ]
    writer2.value_matrix = []

    source = fiona.open(
        os.path.join(data_root, desired_output["outputName"],
예제 #12
0
from pytablewriter import MarkdownTableWriter
import json

writer = MarkdownTableWriter()
writer.table_name = "Intent Cross-Validation Results (5 folds)"

with open('results/intent_report.json', 'r') as f:
    data = json.loads(f.read())
writer.headers = ["class"] + list(data['micro avg'].keys())

classes = list(data.keys())
classes.sort(key=lambda x: data[x]['support'], reverse=True)

writer.value_matrix = [[c] + [data[c][k] for k in data[c].keys()]
                       for c in classes]

writer.dump('results.md')