Esempio n. 1
0
    def option(self):
        """
        Print the options required by the module

        Usage: options [-h]

        Options:
            -h, --help  print this help menu
        """
            
        table_data = [
            ["Name", "Current Settings", "Required", "Description"]
        ]
        
        for name, options in self.options.items():
            table_data.append([name, options["Current Settings"], options["Require"], options["Description"]])
            
        table = AsciiTable(table_data)
        table.inner_column_border = False
        table.inner_footing_row_border = False
        table.inner_heading_row_border = True
        table.inner_row_border = False
        table.outer_border = False
        
        print (f'\nModule Options ({self.name}):\n\n{table.table}\n')
Esempio n. 2
0
def main() :

    # Parameters
    time_start = datetime.datetime.now()
    file_config = os.path.join(os.path.dirname(__file__), 'conf/config.ini')
    Config = ConfigParser.ConfigParser()
    Config.read(file_config)

    # Logging setup
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(Config.get('GLOBAL','application'))
    handler = logging.FileHandler(os.path.join(os.path.dirname(__file__), 'log/'+Config.get('GLOBAL','application')+'.log'))
    handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    # Options
    parser = argparse.ArgumentParser(description='Simple tool to add client into Nagios config files')
    parser.add_argument('--address', action='store', dest='address', help='FQDN of the asset', required=True)
    parser.add_argument('--hosttype', action='store', dest='hosttype', choices=['linux-server','generic-switch'], required=True)
    parser.add_argument('--hostgroups', action='store', dest='hostgroups', help='List of hostgroup(s) comma separated', required=True)
    args = parser.parse_args()

    try :
        # MNCM objects instance
        inst_mncm = MncmMotor(Config.get('GLOBAL','application'), Config.get('NAGIOS','myhostgroups'), Config.get('NAGIOS','myassetsdir'))
        # Check if the client doesn't exist already
        logger.info('Check if client "' + str(args.address) + '" is not already part of this configuration')
        if inst_mncm.host_exist(str(args.address)) :
            raise RuntimeError('The client "' + str(args.address) + '" is already part of this configuration')
        # Check if the hostgroups exist
        logger.info('Check if hostgroup(s) "' + str(args.hostgroups.split(',')) + '" is part of this configuration')
        inst_mncm.hostgroups_exist(str(args.hostgroups).split(','))
        # We can add the client :)
        logger.info('Adding client "' + str(args.address) + '" in the Nagios configuration with hostgroup(s) "' + str(args.hostgroups.split(',')) + '"')
        inst_mncm.add_host(str(args.address),str(args.hostgroups).split(','),str(args.hosttype))
        # Ascii table
        myAsciiTable = [['Client address','Type','Hostgroup(s)','Action']]
        tmpdata = list()
        tmpdata.append(str(args.address)) # Address
        tmpdata.append(str(args.hosttype)) # Hosttype
        tmpdata.append(str(args.hostgroups)) # Hostgroup(s)
        tmpdata.append('Added') # Action
        # Add tmpdata list to myAsciiTable
        myAsciiTable.append(tmpdata)
        myTable = AsciiTable(myAsciiTable)
        myTable.inner_footing_row_border = True
        # End script
        time_stop = datetime.datetime.now()
        time_delta = time_stop - time_start
        # Output data
        print "######### DATE : %s - APP : %s #########" % (time_start.strftime("%Y-%m-%d"),Config.get('GLOBAL','application'))
        print "- Start time : %s" % (time_start.strftime("%Y-%m-%d %H:%M:%S"))
        print "- Finish time : %s" % (time_stop.strftime("%Y-%m-%d %H:%M:%S"))
        print "- Delta time : %d second(s)" % (time_delta.total_seconds())
        print myTable.table

    except Exception as e :
        logger.error('RunTimeError during instance creation : %s', str(e))
def paths(ctx):
    repos = ctx.obj.get("repos")
    output_format = ctx.obj.get("output_format")
    order = ctx.obj.get("order")
    parallel_workers = ctx.obj.get("parallel_workers")

    prog = progressbar(length=len(repos),
                       show_eta=False,
                       label="Fetching paths stats",
                       item_show_func=lambda r: r and r.name)
    with prog, ThreadPoolExecutor(max_workers=parallel_workers) as executor:

        def get(repo):
            try:
                resp = repo.get_top_paths()
            except github.RateLimitExceededException as e:
                retry_after = int(e.headers.get("retry-after", 0))
                if retry_after:
                    time.sleep(retry_after)
                    resp = repo.get_top_paths()
                else:
                    raise e

            return resp, repo

        futures = (executor.submit(get, repo) for repo in repos)

        paths = []
        for f in concurrent.futures.as_completed(futures):
            top_paths, repo = f.result()
            prog.update(1, repo)
            for p in top_paths:
                paths.append({
                    "repo": repo.name,
                    "path": p.path,
                    "title": p.title,
                    "count": p.count,
                    "uniques": p.uniques
                })

    paths = sorted(paths,
                   key=lambda x: (x["uniques"], x["count"], x["repo"]),
                   reverse=True if order == "desc" else False)

    if output_format == "json":
        click.echo(json.dumps(paths, indent=4, sort_keys=True))
    elif output_format == "table":
        labels = [["Repo", "Path", "Uniques", "Count"]]

        rows = []
        for path in paths:
            rows.append(
                [path["repo"], path["path"], path["uniques"], path["count"]])

        table_rows = labels + rows + labels

        table = AsciiTable(table_rows, "Paths")
        table.inner_footing_row_border = True

        click.secho(table.table)
Esempio n. 4
0
def test_single_line():
    """Test single-lined cells."""
    table_data = [
        ['Name', 'Color', 'Type'],
        ['Avocado', 'green', 'nut'],
        ['Tomato', 'red', 'fruit'],
        ['Lettuce', 'green', 'vegetable'],
        ['Watermelon', 'green'],
        [],
    ]
    table = AsciiTable(table_data, 'Example')
    table.inner_footing_row_border = True
    table.justify_columns[0] = 'left'
    table.justify_columns[1] = 'center'
    table.justify_columns[2] = 'right'
    actual = table.table

    expected = (
        '+Example-----+-------+-----------+\n'
        '| Name       | Color |      Type |\n'
        '+------------+-------+-----------+\n'
        '| Avocado    | green |       nut |\n'
        '| Tomato     |  red  |     fruit |\n'
        '| Lettuce    | green | vegetable |\n'
        '| Watermelon | green |           |\n'
        '+------------+-------+-----------+\n'
        '|            |       |           |\n'
        '+------------+-------+-----------+'
    )
    assert actual == expected
Esempio n. 5
0
def render_matched_rules_as_table(state, rules):
    """
    Renders the provided, flattened backup rules (matched) as a table.

    :param state: rules state (included or excluded)
    :param rules: rules to render
    :return: rendered table string
    """

    if rules:
        header = [[
            'State',
            'Entity',
            'Explanation',
        ]]

        footer = [[state, 'Total: {}'.format(len(rules)), '-']]

        table = AsciiTable(header + list(
            map(
                lambda rule: [
                    rule['state'],
                    rule['entity'],
                    rule['explanation'],
                ], rules)) + footer)

        table.inner_footing_row_border = True

        return table.table
    else:
        return 'No data'
Esempio n. 6
0
 def print(self):
     """ Print cart contents """
     table_data = []
     tables_headers = ['Item', '            ', 'Price']
     border_row = ["----", "", "-----"]
     table_data.append(tables_headers)
     table_data.append(border_row)
     msg = ""
     for item in self.contents():
         row = [item.product_code, "", "{0:.2f}".format(item.price)]
         table_data.append(row)
         if item.discount:
             discount_row = ["", item.coupon, f"-{item.discount}"]
             table_data.append(discount_row)
     total_row = ["", "", "{0:.2f}".format(self.total())]
     table_data.append(total_row)
     table = AsciiTable(table_data=table_data)
     table.inner_column_border = False
     table.outer_border = False
     table.inner_heading_row_border = False
     table.inner_footing_row_border = True
     table.justify_columns[1] = 'center' # Justify coupons center
     table.justify_columns[2] = 'right' # Justify prices right
     msg += table.table
     return msg
Esempio n. 7
0
def main_help():

    commands = ['database', 'sniffing', 'exploit', 'modelling', 'exit']
    description = [
        'Use database mode.', 'Use sniffing mode.', 'Use exploit mode.',
        'Use modelling mode.', 'Quit this program'
    ]

    table_data = [['Commands', 'Description']]

    for i in range(len(commands) - 1):
        table_data.append([commands[i], description[i]])

        table = AsciiTable(table_data)
        table.inner_column_border = False
        table.inner_footing_row_border = False
        table.inner_heading_row_border = True
        table.inner_row_border = False
        table.outer_border = False

        msg = f"""
Core commands
=============

{table.table}\n\n"""
    return msg
Esempio n. 8
0
def print_map_summary(mAP, APs, dataset=None):
    """Print mAP and results of each class.

    Args:
        mAP(float): calculated from `eval_map`
        APs(ndarray): calculated from `eval_map`
        dataset(None or str or list): dataset name or dataset classes.
    """

    num_classes = APs.shape[0]

    if dataset is None:
        label_names = [str(i) for i in range(1, num_classes + 1)]
    elif mmcv.is_str(dataset):
        label_names = get_classes(dataset)
    else:
        label_names = list(dataset)

    header = ['class', 'ap']

    table_data = [header]
    for j in range(num_classes):
        row_data = [
            label_names[j], '{:.4f}'.format(APs[j])
        ]
        table_data.append(row_data)
    table_data.append(['mAP', '{:.4f}'.format(mAP)])
    table = AsciiTable(table_data)
    table.inner_footing_row_border = True
    print(table.table)
Esempio n. 9
0
def print_map_summary(mean_ap,
                      results,
                      dataset=None,
                      ranges=None,
                      work_dir=None):
    """Print mAP and results of each class.

    Args:
        mean_ap(float): calculated from `eval_map`
        results(list): calculated from `eval_map`
        dataset(None or str or list): dataset name or dataset classes.
        ranges(list or Tuple): ranges of areas
    """
    num_scales = len(results[0]['ap']) if isinstance(results[0]['ap'],
                                                     np.ndarray) else 1
    if ranges is not None:
        assert len(ranges) == num_scales

    num_classes = len(results)

    recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
    precisions = np.zeros((num_scales, num_classes), dtype=np.float32)
    aps = np.zeros((num_scales, num_classes), dtype=np.float32)
    num_gts = np.zeros((num_scales, num_classes), dtype=int)
    for i, cls_result in enumerate(results):
        if cls_result['recall'].size > 0:
            recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
            precisions[:, i] = np.array(cls_result['precision'], ndmin=2)[:,
                                                                          -1]
        aps[:, i] = cls_result['ap']
        num_gts[:, i] = cls_result['num_gts']

    if dataset is None:
        label_names = [str(i) for i in range(1, num_classes + 1)]
    elif mmcv.is_str(dataset):
        label_names = get_classes(dataset)
    else:
        label_names = dataset

    if not isinstance(mean_ap, list):
        mean_ap = [mean_ap]
    header = ['class', 'gts', 'dets', 'recall', 'precision', 'ap']
    for i in range(num_scales):
        if ranges is not None:
            print("Area range ", ranges[i])
        table_data = [header]
        for j in range(num_classes):
            row_data = [
                label_names[j], num_gts[i, j], results[j]['num_dets'],
                '{:.3f}'.format(recalls[i, j]),
                '{:.3f}'.format(precisions[i, j]), '{:.3f}'.format(aps[i, j])
            ]
            table_data.append(row_data)
        table_data.append(['mAP', '', '', '', '', '{:.3f}'.format(mean_ap[i])])
        table = AsciiTable(table_data)
        table.inner_footing_row_border = True
        print(table.table)
        if work_dir is not None and mmcv.is_str(work_dir):
            with open(work_dir, 'a+') as f:
                print(table.table, file=f)
Esempio n. 10
0
def render_basic(data, title=None):
    table = AsciiTable(data, title=title)
    table.inner_row_border = True
    table.inner_footing_row_border = True
    table.padding_left = 5
    table.padding_right = 5
    print(table.table)
    print("\n")
Esempio n. 11
0
def print_map_summary(mean_ap,
                      results,
                      dataset=None,
                      scale_ranges=None):
    """Print mAP and results of each class.

    A table will be printed to show the gts/dets/recall/AP of each class and
    the mAP.

    Args:
        mean_ap (float): Calculated from `eval_map()`.
        results (list[dict]): Calculated from `eval_map()`.
        dataset (list[str] | str | None): Dataset name or dataset classes.
        scale_ranges (list[tuple] | None): Range of scales to be evaluated.
    """
    if isinstance(results[0]['ap'], np.ndarray):
        num_scales = len(results[0]['ap'])
    else:
        num_scales = 1

    if scale_ranges is not None:
        assert len(scale_ranges) == num_scales

    num_classes = len(results)

    recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
    aps = np.zeros((num_scales, num_classes), dtype=np.float32)
    num_gts = np.zeros((num_scales, num_classes), dtype=int)
    for i, cls_result in enumerate(results):
        if cls_result['recall'].size > 0:
            recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
        aps[:, i] = cls_result['ap']
        num_gts[:, i] = cls_result['num_gts']

    if dataset is None:
        label_names = [str(i) for i in range(num_classes)]
    else:
        label_names = get_classes(dataset)

    if not isinstance(mean_ap, list):
        mean_ap = [mean_ap]

    header = ['class', 'gts', 'dets', 'recall', 'ap']
    for i in range(num_scales):
        if scale_ranges is not None:
            print(f'Scale range {scale_ranges[i]}')
        table_data = [header]
        for j in range(num_classes):
            row_data = [
                label_names[j], num_gts[i, j], results[j]['num_dets'],
                f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'
            ]
            table_data.append(row_data)
        table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])
        table = AsciiTable(table_data)
        table.inner_footing_row_border = True
        print('\n' + table.table)
Esempio n. 12
0
def print_summary_table(out_prefix,
                        project,
                        toolchain,
                        board,
                        build_type,
                        build_nr=None):
    """Prints a summary table of the outcome of each test."""
    builds = get_builds(out_prefix)
    table_data = [[
        'Project', 'Toolchain', 'Family', 'Part', 'Board', 'Build Type',
        'Build N.', 'Options'
    ]]
    passed = failed = 0
    build_count = 0
    for build in sorted(builds):
        # Split directory name into columns
        # Example: oneblink_vpr_xc7_a35tcsg326-1_arty_generic-build_0_options
        pattern = ''
        for i in range(0, len(table_data[0]) - 1):
            pattern += '([^_]*)_'
        pattern += '(.*)'

        row = list(re.match(pattern, build).groups())

        if build_type != row[5] or (build_nr and int(build_nr) != int(row[6])):
            continue
        if project and row[0] not in project:
            continue
        if toolchain and row[1] not in toolchain:
            continue
        if board and row[4] not in board:
            continue

        # Check if metadata was generated
        # It is created for successful builds only
        if os.path.exists(
                os.path.join(root_dir, out_prefix, build, 'meta.json')):
            row.append(Color('{autogreen}passed{/autogreen}'))
            passed += 1
        else:
            row.append(Color('{autored}failed{/autored}'))
            failed += 1
        table_data.append(row)
        build_count += 1

    table_data.append([
        Color('{autogreen}Passed:{/autogreen}'), passed,
        Color('{autored}Failed:{/autored}'), failed, '', '', '', '',
        '{}%'.format(int(passed / build_count * 100))
    ])
    table = AsciiTable(table_data)
    table.inner_footing_row_border = True
    print(table.table)

    return failed == 0
Esempio n. 13
0
def tables(matriz, ult_borda=False, title="", separar_linhas=False):
    """ Recebe a tabela em formatos Matriz e a imprime no terminal."""
    tables_terminal = AsciiTable(matriz)
    tables_terminal.inner_footing_row_border = ult_borda
    tables_terminal.inner_row_border = separar_linhas
    tables_terminal.title = title
    if tables_terminal.ok:
        print(tables_terminal.table)
    else:
        print(
            "Tabela Muito grande, Recue o zoom\nou aumente a janela do terminal!"
        )
Esempio n. 14
0
def get_snapshots(args):
    try:
        from terminaltables import AsciiTable
    except ImportError:
        AsciiTable = False
        pass
    response = requests.get(args.host + "/api/public/v1.0/groups/" +
                            args.group + "/clusters/" + args.clusterId +
                            "/snapshots",
                            auth=HTTPDigestAuth(args.username, args.apikey))
    response.raise_for_status()
    vprint("============= response ==============", args)
    vprint(vars(response), args)
    vprint("============= end response ==============", args)

    hosts_json = response.json()
    vprint(hosts_json, args)
    table_data = [[
        'created', 'expires', 'complete', 'replicaSetName', 'id', 'parts'
    ]]
    for host in hosts_json['results']:
        row = []
        part_data = [[
            'replicaSetName', 'storageSizeBytes', 'mongodbVersion', 'typeName',
            'fileSizeBytes', 'dataSizeBytes'
        ]]
        for column in table_data[0]:
            if column == 'parts':
                parts = []
                for part in host['parts']:
                    for pcol in part_data[0]:
                        parts.append(pcol + ":" + str(part.get(pcol)))
                    parts.append("++++++++++++++")
                    row.append(str.join("\n", parts))
            elif column == 'created':
                row.append(str(host.get('created').get('date')))
            else:
                row.append(str(host.get(column)))
        table_data.append(row)

    table_data.append(
        ['', '', 'Number of snapshots',
         str(hosts_json['totalCount'])])

    host_info = 'Snapshots from ' + args.host + " for clusterId=" + args.clusterId

    if AsciiTable:
        table = AsciiTable(table_data, host_info)
        table.inner_footing_row_border = True
        print table.table
    else:
        import pprint
        pprint.pprint(table_data)
Esempio n. 15
0
def ps(gravitee_api, deploy_state):
    """APIs list"""
    resp = gravitee_api.get_apis()

    if not resp:
        click.echo("No Api(s) found ")
    else:

        data = []

        if deploy_state:
            data.append(['id', 'Name', 'Synchronized', 'Status'])
        else:
            data.append(['id', 'Name', 'Status'])

        for api_item in resp.json():
            if api_item['state'] == 'started':
                state_color = 'green'
            else:
                state_color = 'red'

            if 'workflow_state' in api_item:
                state = click.style(api_item['workflow_state'].upper(),
                                    fg='blue') + "-" + click.style(
                                        api_item['state'].upper(),
                                        fg=state_color)
            else:
                state = click.style(api_item['state'].upper(), fg=state_color)

            if deploy_state:
                response_state = gravitee_api.state_api(api_item['id'])
                synchronized = click.style("X", fg='yellow')
                if response_state.json()["is_synchronized"]:
                    synchronized = click.style("V", fg='green')

                data.append(
                    [api_item['id'], api_item['name'], synchronized, state])
            else:

                if api_item['state'] == 'started':
                    color = 'green'
                else:
                    color = 'red'
                data.append([api_item['id'], api_item['name'], state])

        table = AsciiTable(data)
        table.inner_footing_row_border = False
        table.inner_row_border = False
        table.inner_column_border = False
        table.outer_border = False
        if deploy_state:
            table.justify_columns[2] = 'center'
        click.echo(table.table)
Esempio n. 16
0
    def echo(self, data):
        table = AsciiTable(data)
        table.inner_footing_row_border = False
        table.inner_row_border = False
        table.inner_column_border = False
        table.outer_border = False

        # print("{}".format(justify_columns))
        if not self.style is None:
            table.justify_columns = self.style

        click.echo(table.table)
Esempio n. 17
0
def seg_eval(gt_labels, seg_preds, label2cat, logger=None):
    """Semantic Segmentation  Evaluation.

     Evaluate the result of the Semantic Segmentation.

     Args:
         gt_labels (list[torch.Tensor]): Ground truth labels.
         seg_preds  (list[torch.Tensor]): Predtictions
         label2cat (dict): Map from label to category.
         logger (logging.Logger | str | None): The way to print the mAP
            summary. See `mmdet.utils.print_log()` for details. Default: None.

    Return:
        dict[str, float]: Dict of results.
    """
    assert len(seg_preds) == len(gt_labels)

    hist_list = []
    for i in range(len(seg_preds)):
        hist_list.append(
            fast_hist(seg_preds[i].numpy().astype(int),
                      gt_labels[i].numpy().astype(int), len(label2cat)))
    iou = per_class_iou(sum(hist_list))
    miou = np.nanmean(iou)
    acc = get_acc(sum(hist_list))
    acc_cls = get_acc_cls(sum(hist_list))

    header = ['classes']
    for i in range(len(label2cat)):
        header.append(label2cat[i])
    header.extend(['miou', 'acc', 'acc_cls'])

    ret_dict = dict()
    table_columns = [['results']]
    for i in range(len(label2cat)):
        ret_dict[label2cat[i]] = float(iou[i])
        table_columns.append([f'{iou[i]:.4f}'])
    ret_dict['miou'] = float(miou)
    ret_dict['acc'] = float(acc)
    ret_dict['acc_cls'] = float(acc_cls)

    table_columns.append([f'{miou:.4f}'])
    table_columns.append([f'{acc:.4f}'])
    table_columns.append([f'{acc_cls:.4f}'])

    table_data = [header]
    table_rows = list(zip(*table_columns))
    table_data += table_rows
    table = AsciiTable(table_data)
    table.inner_footing_row_border = True
    print_log('\n' + table.table, logger=logger)

    return ret_dict
Esempio n. 18
0
    def print(self, data, **kwargs):
        table = AsciiTable(data)
        table.inner_footing_row_border = False
        table.inner_row_border = False
        table.inner_column_border = False
        table.outer_border = False
        if "inner_heading_row_border" in kwargs:
            table.inner_heading_row_border = kwargs["inner_heading_row_border"]

        if "style" in kwargs:
            table.justify_columns = kwargs["style"]

        click.echo(table.table)
Esempio n. 19
0
def print_map_summary(mean_ap, results, dataset=None):
    """Print mAP and results of each class.

    Args:
        mean_ap(float): calculated from `eval_map`
        results(list): calculated from `eval_map`
        dataset(None or str or list): dataset name or dataset classes.
    """
    num_scales = (len(results[0]["ap"])
                  if isinstance(results[0]["ap"], np.ndarray) else 1)
    num_classes = len(results)

    recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
    precisions = np.zeros((num_scales, num_classes), dtype=np.float32)
    aps = np.zeros((num_scales, num_classes), dtype=np.float32)
    num_gts = np.zeros((num_scales, num_classes), dtype=int)
    for i, cls_result in enumerate(results):
        if cls_result["recall"].size > 0:
            recalls[:, i] = np.array(cls_result["recall"], ndmin=2)[:, -1]
            precisions[:, i] = np.array(cls_result["precision"], ndmin=2)[:,
                                                                          -1]
        aps[:, i] = cls_result["ap"]
        num_gts[:, i] = cls_result["num_gts"]

    if dataset is None:
        label_names = [str(i) for i in range(1, num_classes + 1)]
    elif mmcv.is_str(dataset):
        label_names = get_classes(dataset)
    else:
        label_names = dataset

    if not isinstance(mean_ap, list):
        mean_ap = [mean_ap]
    header = ["class", "gts", "dets", "recall", "precision", "ap"]
    for i in range(num_scales):
        table_data = [header]
        for j in range(num_classes):
            row_data = [
                label_names[j],
                num_gts[i, j],
                results[j]["num_dets"],
                "{:.3f}".format(recalls[i, j]),
                "{:.3f}".format(precisions[i, j]),
                "{:.3f}".format(aps[i, j]),
            ]
            table_data.append(row_data)
        table_data.append(["mAP", "", "", "", "", "{:.3f}".format(mean_ap[i])])
        table = AsciiTable(table_data)
        table.inner_footing_row_border = True
        print(table.table)
Esempio n. 20
0
def lyft_eval(lyft, data_root, res_path, eval_set, output_dir, logger=None):
    """Evaluation API for Lyft dataset.

    Args:
        lyft (:obj:`LyftDataset`): Lyft class in the sdk.
        data_root (str): Root of data for reading splits.
        res_path (str): Path of result json file recording detections.
        eval_set (str): Name of the split for evaluation.
        output_dir (str): Output directory for output json files.
        logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.

    Returns:
        dict[str, float]: The evaluation results.
    """
    # evaluate by lyft metrics
    gts = load_lyft_gts(lyft, data_root, eval_set, logger)
    predictions = load_lyft_predictions(res_path)

    class_names = get_class_names(gts)
    print('Calculating [email protected]:0.95...')

    iou_thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
    metrics = {}
    average_precisions = \
        get_classwise_aps(gts, predictions, class_names, iou_thresholds)
    APs_data = [['IOU', 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]]

    mAPs = np.mean(average_precisions, axis=0)
    mAPs_cate = np.mean(average_precisions, axis=1)
    final_mAP = np.mean(mAPs)

    metrics['average_precisions'] = average_precisions.tolist()
    metrics['mAPs'] = mAPs.tolist()
    metrics['Final mAP'] = float(final_mAP)
    metrics['class_names'] = class_names
    metrics['mAPs_cate'] = mAPs_cate.tolist()

    APs_data = [['class', '[email protected]:0.95']]
    for i in range(len(class_names)):
        row = [class_names[i], round(mAPs_cate[i], 3)]
        APs_data.append(row)
    APs_data.append(['Overall', round(final_mAP, 3)])
    APs_table = AsciiTable(APs_data, title='[email protected]:0.95')
    APs_table.inner_footing_row_border = True
    print_log(APs_table.table, logger=logger)

    res_path = osp.join(output_dir, 'lyft_metrics.json')
    mmcv.dump(metrics, res_path)
    return metrics
Esempio n. 21
0
def get_alerts(args):
    try:
        from terminaltables import AsciiTable
    except ImportError:
        AsciiTable = False
        pass
    if args.format == 'json':
        AsciiTable = False
    host = args.host
    group_id = args.group
    user_name = args.username
    api_key = args.apikey
    response = requests.get(host + "/api/public/v1.0/groups/" + group_id +
                            "/alerts/",
                            auth=HTTPDigestAuth(user_name, api_key))
    response.raise_for_status()
    vprint("============= response ==============", args)
    vprint(vars(response), args)
    vprint("============= end response ==============", args)

    alerts_json = response.json()

    table_data = [['eventTypeName', 'status', 'created', 'replicaSetName']]

    for alert in alerts_json['results']:
        row = [
            str(alert.get('eventTypeName')),
            str(alert.get('status')),
            str(alert.get('created')),
            str(alert.get('replicaSetName', ''))
        ]
        table_data.append(row)

    table_data.append(
        ['', '', 'Number alerts',
         str(alerts_json['totalCount'])])

    host_info = 'Alerts from ' + host

    if AsciiTable:
        table = AsciiTable(table_data, host_info)
        table.inner_footing_row_border = True
        print table.table
    else:
        if args.format == 'json':
            print(json.dumps(alerts_json))
        else:
            import pprint
            pprint.pprint(table_data)
    return alerts_json
Esempio n. 22
0
def print_map_summary(mean_ap, results, dataset=None):
    """Print mAP and results of each class.

    Args:
        mean_ap(float): calculated from `eval_map`
        results(list): calculated from `eval_map`
        dataset(None or str or list): dataset name or dataset classes.
    """
    num_scales = len(results[0]['ap']) if isinstance(results[0]['ap'],
                                                     np.ndarray) else 1
    num_classes = len(results)

    recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
    precisions = np.zeros((num_scales, num_classes), dtype=np.float32)
    aps = np.zeros((num_scales, num_classes), dtype=np.float32)
    num_gts = np.zeros((num_scales, num_classes), dtype=int)
    for i, cls_result in enumerate(results):
        if cls_result['recall'].size > 0:
            recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
            precisions[:, i] = np.array(
                cls_result['precision'], ndmin=2)[:, -1]
        aps[:, i] = cls_result['ap']
        num_gts[:, i] = cls_result['num_gts']

    if dataset is None:
        label_names = [str(i) for i in range(1, num_classes + 1)]
    elif mmcv.is_str(dataset):
        label_names = get_classes(dataset)
    else:
        label_names = dataset

    if not isinstance(mean_ap, list):
        mean_ap = [mean_ap]
    header = ['class', 'gts', 'dets', 'recall', 'precision', 'ap']
    for i in range(num_scales):
        table_data = [header]
        for j in range(num_classes):        # when eval voc dataset, the num_classes are default to 80, while voc have only 20 classes in list of label_nmae it may cause list index out of range
            if dataset == 'voc07' and j == 1:
                break
            row_data = [
                label_names[j], num_gts[i, j], results[j]['num_dets'],
                '{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(
                    precisions[i, j]), '{:.3f}'.format(aps[i, j])
            ]
            table_data.append(row_data)
        table_data.append(['mAP', '', '', '', '', '{:.3f}'.format(mean_ap[i])])
        table = AsciiTable(table_data)
        table.inner_footing_row_border = True
        print(table.table)
Esempio n. 23
0
def output_ascii_table(table_title=None,
                       table_data=None,
                       inner_heading_row_border=False,
                       inner_footing_row_border=False,
                       inner_row_border=False):
    """
    @type table_title: unicode
    @type table_data: list
    @type inner_heading_row_border: bool
    @type inner_footing_row_border: bool
    @type inner_row_border: bool
    """
    table = AsciiTable(table_data)
    table.inner_heading_row_border = inner_heading_row_border
    table.inner_row_border = inner_row_border
    table.inner_footing_row_border = inner_footing_row_border
    table.title = table_title
    print(table.table)
Esempio n. 24
0
def output_ascii_table(table_title=None,
                       table_data=None,
                       inner_heading_row_border=False,
                       inner_footing_row_border=False,
                       inner_row_border=False):
    """
    @type table_title: unicode
    @type table_data: list
    @type inner_heading_row_border: bool
    @type inner_footing_row_border: bool
    @type inner_row_border: bool
    """
    table = AsciiTable(table_data)
    table.inner_heading_row_border = inner_heading_row_border
    table.inner_row_border = inner_row_border
    table.inner_footing_row_border = inner_footing_row_border
    table.title = table_title
    print(table.table)
Esempio n. 25
0
def print_map_summary(mean_ap, results, dataset=None):
    """Print mAP and results of each class

    Args:
        mean_ap(float): calculated from `eval_map`
        results(list): calculated from `eval_map`
        dataset(None or str or list): get label names by dataset, see `cvbase.read_labels()`
    """
    num_scales = len(results[0]['ap']) if isinstance(results[0]['ap'],
                                                     np.ndarray) else 1
    num_classes = len(results)

    recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
    precisions = np.zeros((num_scales, num_classes), dtype=np.float32)
    aps = np.zeros((num_scales, num_classes), dtype=np.float32)
    num_gts = np.zeros((num_scales, num_classes), dtype=int)
    for i, cls_result in enumerate(results):
        if cls_result['recall'].size > 0:
            recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
            precisions[:, i] = np.array(cls_result['precision'], ndmin=2)[:,
                                                                          -1]
        aps[:, i] = cls_result['ap']
        num_gts[:, i] = cls_result['num_gts']

    try:
        label_names = read_labels(dataset)
    except:
        label_names = range(1, num_classes + 1)
    if not isinstance(mean_ap, list):
        mean_ap = [mean_ap]
    header = ['class', 'gts', 'dets', 'recall', 'precision', 'ap']
    for i in range(num_scales):
        table_data = [header]
        for j in range(num_classes):
            row_data = [
                label_names[j], num_gts[i, j], results[j]['num_dets'],
                '{:.3f}'.format(recalls[i, j]),
                '{:.3f}'.format(precisions[i, j]), '{:.3f}'.format(aps[i, j])
            ]
            table_data.append(row_data)
        table_data.append(['mAP', '', '', '', '', '{:.3f}'.format(mean_ap[i])])
        table = AsciiTable(table_data)
        table.inner_footing_row_border = True
        print(table.table)
Esempio n. 26
0
def display_pull_stats(reviewers, reviewer_workload):
    table_data = [
        ['Assignee', '# assigned'],
    ]

    total = 0
    for reviewer in reviewers:
        workload = reviewer_workload[reviewer]
        table_data.append(
            [reviewer, six.text_type(workload)]
        )
        total += workload

    table_data.append(['TOTAL: ', six.text_type(total)])

    table = AsciiTable(table_data)
    table.inner_footing_row_border = True
    table.justify_columns = {1: 'center'}
    print(table.table)
Esempio n. 27
0
def print_results(results, use_color, outfile):
    """ Print the given scan results """

    table = AsciiTable([["File", "Type", "Code", "Line Number"]] + results)
    table.justify_columns = {0: "center", 1: "center", 2: "left", 3: "center"}
    table.inner_row_border = True
    table.inner_footing_row_border = True
    table.inner_heading_row_border = True
    table.outer_border = True
    output = table.table.split("\n")
    output[2] = output[2].replace("-", "=")
    print("\n".join(output) + "\n")
    if outfile:
        with open(outfile, "w") as f:
            f.write("\n".join(output) + "\n")
    my_print("[+] Analysis complete: %d suspicious code fragments found" %
             len(results),
             "blue",
             use_color=use_color)
Esempio n. 28
0
def get_category_table(ground_truth, predicted, categories=None, label=None):
    if categories is None:
        categories = [0, 1, 2, 4]
    hits = {}
    misses = {}
    for value_pair in zip(ground_truth, predicted):
        true_category = get_category(value_pair[0], categories)
        predicted_category = get_category(value_pair[1], categories)

        if true_category == predicted_category:
            hits[true_category] = hits.get(true_category, 0) + 1
        else:
            misses[true_category] = misses.get(true_category, 0) + 1

    table_data = [["Category", "Hits", "Misses", "Total"]]
    cat_strings = get_category_strings(categories)
    for i, category in enumerate(categories):
        hit_count = hits.get(category, 0)
        miss_count = misses.get(category, 0)

        cat_str = cat_strings[i]

        record = [
            cat_str,
            str(hit_count),
            str(miss_count),
            str(hit_count + miss_count)
        ]
        table_data.append(record)
    total_hits = sum(hits.values())
    total_misses = sum(misses.values())
    table_data.append([
        "Total",
        str(total_hits),
        str(total_misses),
        str(total_hits + total_misses)
    ])
    table = Table(table_data)
    table.title = "Categoric rating"
    if label:
        table.title += ": " + label
    table.inner_footing_row_border = True
    return table
Esempio n. 29
0
def make_table(metric_name, header_name, metric_by_class, header_lst):
    def _fill_table(fld, data, template='{:.03f}', avg=False):
        if isinstance(fld, list) and len(fld) == len(data):
            # Fill multiple rows.
            rows = []
            for idx in range(len(fld)):
                rows += _fill_table(fld[idx], data[idx], template, avg)

            return rows
        else:
            # Fill one row.
            assert len(data.shape) == 1, \
                'Invalid data shape: {}'.format(data.shape)

            row = ['{}'.format(fld)]
            row += map(lambda v: template.format(v), data.tolist())
            if avg:
                row += [template.format(data.mean())]

            return [row]

    title = '{} results on {}'.format(metric_name, cfg.DATASET)

    tiou_row = _fill_table(header_name, tiou_thresh, template='{:.02f}')
    tiou_row[0].append('Avg')
    cls_rows = _fill_table(['{:d}'.format(c) for c in range(cfg.NUM_CLASSES)],
                           metric_by_class,
                           avg=True)

    metric_avg = metric_by_class.mean(axis=0)
    metric_row = _fill_table(metric_name, metric_avg, avg=True)

    table_data = tiou_row + \
        (cls_rows if cfg.EVAL.TBL_INCLUDE_CLS else []) + \
        metric_row

    table = AsciiTable(table_data, title)
    table.justify_columns[-1] = 'right'
    table.inner_footing_row_border = True

    return table
Esempio n. 30
0
def get_hosts(args):
    try:
        from terminaltables import AsciiTable
    except ImportError:
        AsciiTable = False
        pass
    response = requests.get(args.host + "/api/public/v1.0/groups/" +
                            args.group + "/hosts/",
                            auth=HTTPDigestAuth(args.username, args.apikey))
    response.raise_for_status()
    vprint("============= response ==============", args)
    vprint(vars(response), args)
    vprint("============= end response ==============", args)

    hosts_json = response.json()

    table_data = [[
        'hostname', 'id', 'clusterId', 'version', 'typeName', 'replicaSetName',
        'replicaStateName', 'lastPing'
    ]]

    for host in hosts_json['results']:
        row = []
        for column in table_data[0]:
            row.append(str(host.get(column)))
        table_data.append(row)

    table_data.append(
        ['', '', 'Number of hosts',
         str(hosts_json['totalCount'])])

    host_info = 'Hosts from ' + args.host

    if AsciiTable:
        table = AsciiTable(table_data, host_info)
        table.inner_footing_row_border = True
        print table.table
    else:
        import pprint
        pprint.pprint(table_data)
Esempio n. 31
0
    def listSniffers(self):
        """
        List avaialble sniffer

        Usage: listSniffers
        """        

        table_data = [
            ["Idenfifier", "Name", "device(s)", "Protocol", "Is_alive"]
        ]

        for s in self.sniffers.keys():
            table_data.append([s, self.sniffers[s]['name'], self.sniffers[s]['thread'].device, self.sniffers[s]['protocol'], self.sniffers[s]['thread'].is_alive()])
            
        table = AsciiTable(table_data)
        table.inner_column_border = False
        table.inner_footing_row_border = False
        table.inner_heading_row_border = True
        table.inner_row_border = False
        table.outer_border = False
        
        print (f'\nList of available sniffers:\n\n{table.table}\n')
Esempio n. 32
0
def display_summary(Dict, num_classes):
    """Print metric results of each class.
    Args:
        Dict(dict(metric name=metric value))
        num_classes
    """

    header = []
    for key, data in Dict.items():
        header.append(key)
    table_data = [header]
    for j in range(num_classes):
        row_data = []
        for data in Dict.values():
            if isinstance(data[j], (float, np.float32)):
                str_data = '{:.4g}'.format(data[j])
            elif isinstance(data[j], (int, np.int64, np.int32)):
                str_data = '{:6d}'.format(data[j])
            else:
                str_data = '{:6s}'.format(str(data[j]))
            row_data.append(str_data)
        table_data.append(row_data)

    mean = ['Mean']
    for i, data in enumerate(Dict.values()):
        if i == 0:
            continue
        if isinstance(data[0], (int, float, np.float32)):
            ave = np.asarray(data).mean()
            # else:
            #     ave = 0.0
            mean.append('{:.4f}'.format(ave))
        else:
            mean.append(' --- ')
    table_data.append(mean)

    table = AsciiTable(table_data)
    table.inner_footing_row_border = True
    print(table.table)
Esempio n. 33
0
            # Add message to list
            tmpdata.append(mess_deleted)
        else :
            # Add message to list
            tmpdata.append("Google Agenda booked but ITEA calendar not")
    
    # Add tmpdata list to myAsciiTable 
    myAsciiTable.append(tmpdata)
                    
# Create AsciiTable and total
tmpdata = list()
tmpdata.append("Total : " + str(len(myAsciiTable) - 1) + " row(s)")
tmpdata.append("")
tmpdata.append("")
tmpdata.append("")
tmpdata.append("")
myAsciiTable.append(tmpdata)
myTable = AsciiTable(myAsciiTable)
myTable.inner_footing_row_border = True
myTable.justify_columns[1] = myTable.justify_columns[2] = myTable.justify_columns[3] = 'right'

# End script
time_stop = datetime.datetime.now()
time_delta = time_stop - time_start

# Output data
print "######### Date : %s - App : %s #########" % (time_start.strftime("%Y-%m-%d"),Config.get('GLOBAL','application'))
print "- Start time : %s" % (time_start.strftime("%Y-%m-%d %H:%M:%S"))
print "- Finish time : %s" % (time_stop.strftime("%Y-%m-%d %H:%M:%S"))
print "- Delta time : %d second(s)" % (time_delta.total_seconds())
print myTable.table