Exemple #1
0
    def console_writer_before_each_scenario(self, scenario):
        """
            Writes the scenario header to the console

            :param Scenario scenario: the scenario to write to the console
        """
        output = "\n"
        if isinstance(scenario.parent, ScenarioOutline):
            if world.config.write_steps_once:
                return

            id_prefix = self.get_id_sentence_prefix(
                scenario, colorful.bold_yellow, len(scenario.parent.scenarios))
            colored_pipe = colorful.bold_white("|")
            output = "        {0}{1} {2} {1}".format(
                id_prefix,
                colored_pipe,
                (" {0} ").format(colored_pipe).join(
                    str(
                        colorful.bold_yellow("{1: <{0}}".format(
                            scenario.parent.get_column_width(i), x)))
                    for i, x in enumerate(scenario.example.data)),
            )
        elif isinstance(scenario.parent, ScenarioLoop):
            if world.config.write_steps_once:
                return

            id_prefix = self.get_id_sentence_prefix(
                scenario, colorful.bold_yellow, len(scenario.parent.scenarios))
            colored_pipe = colorful.bold_white("|")
            output = "        {0}{1} {2: <18} {1}".format(
                id_prefix, colored_pipe,
                str(colorful.bold_yellow(scenario.iteration)))
        else:
            id_prefix = self.get_id_sentence_prefix(scenario,
                                                    colorful.bold_cyan)
            for tag in scenario.tags:
                if (
                        tag.name == "precondition" and world.config.expand
                        and world.config.show
                ):  # exceptional for show command when scenario steps expand and tag is a precondition -> comment it out
                    output += colorful.white("    # @{0}{1}\n".format(
                        tag.name, "({0})".format(tag.arg) if tag.arg else ""))
                else:
                    output += colorful.cyan("    @{0}{1}\n".format(
                        tag.name, "({0})".format(tag.arg) if tag.arg else ""))
            output += "    {0}{1}: {2}".format(
                id_prefix,
                colorful.bold_white(scenario.keyword),
                colorful.bold_white(scenario.sentence),
            )
        write(output)
Exemple #2
0
Fichier : eval.py Projet : yyht/PRS
def get_features(task_id, model, data, cat_map):
    print(
        colorful.bold_yellow('Gathering Features of Task: ' +
                             str(task_id)).styled_string)
    with torch.no_grad():
        features_total = []
        cats_total = []
        for i, (imgs, cats) in enumerate(iter(data)):
            # batch_size
            batch_size = imgs.shape[0]

            # Move to device, if available
            imgs = imgs.to(model.device)
            cats = cats.to(model.device)

            # Forward prop.
            features = model.get_features(imgs)
            cats_name = []
            for cat in cats:
                cats_name.append(cat_map[(cat > 0).cpu()])

            features_total.append(features.cpu())
            cats_total.extend(cats_name)

        features_total = torch.cat(features_total, axis=0)

    return features_total, cats_total
Exemple #3
0
 def str(self):
     for node in self.root.children.nodes_by_key.values():
         self.descend(node)
     key_values = [s.split('=') for s in self.lines]
     tree_width = max(len(x[0]) for x in key_values)
     lines = [
         str(colorful.bold_cyan(x[0])) + (' ' * (tree_width - len(x[0]))) +
         str(colorful.bold_yellow(' = ')) + x[1] for x in key_values
     ]
     return '\n'.join(lines)
Exemple #4
0
 def format(self, record):
     levelname = record.levelname
     if levelname in ('NOTSET', 'DEBUG'):
         record.levelname = colorful.cyan(levelname)
     elif levelname in ('INFO', ):
         record.levelname = colorful.green(levelname)
     elif levelname in ('WARNING', ):
         record.levelname = colorful.bold_yellow(levelname)
     elif levelname in ('ERROR', 'CRITICAL'):
         record.levelname = colorful.bold_red(levelname)
     return logging.Formatter.format(self, record)
Exemple #5
0
def write_stdout(level, message):

    prefix = colorful.bold_yellow(u'\u229b INFO :')
    if level == 'WARNING':
        prefix = colorful.bold_red(u'\u2757 WARNING :')
        message = colorful.yellow(message)


    added_prefix = u'\n\t\t{}\t{} '.format(colorful.gray(u'\u2502'),' '*len(prefix))
    message = message.split('\n')

    console_write(u'\t\t\u251c\u2501\t{} {}'.format(prefix, added_prefix.join(message)))
Exemple #6
0
def diff_file(
    schema: str,
    db: str,
    unsafe: bool = False,
    apply: bool = False,
):
    """
    Diff a file of SQL statements against a database.
    :param schema: The SQL schema to match against.
    :param db: The database to target.
    :param unsafe: Generate unsafe statements.
    :param apply: Apply the statements to bring the target database up to date.
    """
    if not os.path.exists(schema):
        print(cf.bold_red("Error:"),
              f'Could not find file "{schema}"',
              file=sys.stderr)
        sys.exit(os.EX_OSFILE)

    sql_statements = sql_from_file(schema)

    try:
        statements, generated_unsafe = diff(sql_statements, db, unsafe, apply,
                                            True)
    except DatabaseDoesNotExist as e:
        print(
            cf.bold_red("Error:"),
            f'Database "{e.database}" does not exist.',
            file=sys.stderr,
        )
        sys.exit(os.EX_NOHOST)
    except SQLSyntaxError as e:
        print(cf.bold_red("Error:"), e.error, file=sys.stderr)
        sys.exit(os.EX_DATAERR)

    if generated_unsafe:
        print(
            cf.bold_yellow("Careful:"),
            "Unsafe statements generated.",
            file=sys.stderr,
        )
        print("Run again with", cf.bold("--unsafe"))
        sys.exit(os.EX_USAGE)
Exemple #7
0
 parser.add_argument('-test', help="testing mode", action='store_true')
 parser.add_argument('-ov',
                     help="override pixel locations",
                     action='store_true')
 args = parser.parse_args()
 depth = args.depth
 source = args.source
 testing_mode = args.test
 english_words = load_words()
 if source == 'browser':
     grid = setup()
 else:
     grid = populate()
 binds = mapping(args.ov)
 cf.use_style('solarized')
 print(cf.bold_yellow(f'Dict size: {len(english_words)}'))
 print('depth is', depth)
 print('source is ', source)
 print('testing mode is', testing_mode)
 if not grid:
     if not testing_mode:
         print(
             cf.red_bold(
                 'Failed to recognize the grid. Try again on a different puzzle'
             ))
         import sys
         sys.exit(1)
     grid = populate()
 print("Grid is")
 print(np.asarray(grid))
 # TODO: populate grid
Exemple #8
0
    def console_writer_before_each_scenario(self, scenario):
        """
            Writes the scenario header to the console

            :param Scenario scenario: the scenario to write to the console
        """
        output = "\n"
        if isinstance(scenario.parent, ScenarioOutline):
            if world.config.write_steps_once:
                return

            id_prefix = self.get_id_sentence_prefix(
                scenario, colorful.bold_yellow, len(scenario.parent.scenarios)
            )
            colored_pipe = colorful.bold_white("|")
            output = "        {0}{1} {2} {1}".format(
                id_prefix,
                colored_pipe,
                (" {0} ")
                .format(colored_pipe)
                .join(
                    str(
                        colorful.bold_yellow(
                            "{1: <{0}}".format(scenario.parent.get_column_width(i), x)
                        )
                    )
                    for i, x in enumerate(scenario.example.data)
                ),
            )
        elif isinstance(scenario.parent, ScenarioLoop):
            if world.config.write_steps_once:
                return

            id_prefix = self.get_id_sentence_prefix(
                scenario, colorful.bold_yellow, len(scenario.parent.scenarios)
            )
            colored_pipe = colorful.bold_white("|")
            output = "        {0}{1} {2: <18} {1}".format(
                id_prefix, colored_pipe, str(colorful.bold_yellow(scenario.iteration))
            )
        else:
            id_prefix = self.get_id_sentence_prefix(scenario, colorful.bold_cyan)
            for tag in scenario.tags:
                if (
                    tag.name == "precondition"
                    and world.config.expand
                    and world.config.show
                ):  # exceptional for show command when scenario steps expand and tag is a precondition -> comment it out
                    output += colorful.white(
                        "    # @{0}{1}\n".format(
                            tag.name, "({0})".format(tag.arg) if tag.arg else ""
                        )
                    )
                else:
                    output += colorful.cyan(
                        "    @{0}{1}\n".format(
                            tag.name, "({0})".format(tag.arg) if tag.arg else ""
                        )
                    )
            output += "    {0}{1}: {2}".format(
                id_prefix,
                colorful.bold_white(scenario.keyword),
                colorful.bold_white(scenario.sentence),
            )
        write(output)
Exemple #9
0
                if predicate:
                    predicate_signatures[predicate.name] = predicate.arguments

    # Scan codebase for violations
    violations_found = False
    for file_path in args.files:
        with open(file_path) as file:
            for line_number, predicate in match_predicates(file):
                actual_arity = len(predicate.arguments)
                try:
                    expected_arity = len(predicate_signatures[predicate.name])
                    assert expected_arity == actual_arity

                except KeyError:
                    # Missing annotation
                    predicate_signature = colorful.bold_yellow(
                        f'{predicate.name}/{actual_arity}')
                    print_message(colorful.yellow(
                        f'Missing annotation for {predicate_signature}'),
                                  file_name=file_path,
                                  line_number=line_number)
                    violations_found = True

                except AssertionError:
                    # Annotation violation
                    actual_signature = colorful.bold_red(
                        f'{predicate.name}/{actual_arity}')
                    expected_signature = colorful.bold_red(
                        f'{predicate.name}/{expected_arity}')
                    print_message(colorful.red(
                        f'{actual_signature} should be {expected_signature}'),
                                  file_name=file_path,
Exemple #10
0
    def console_write(self, features, marker):
        """
            Writes the endreport for all features

            :param list features: all features
        """
        stats = {
            "features": {
                "amount": 0,
                "passed": 0,
                "failed": 0,
                "skipped": 0,
                "untested": 0,
                "pending": 0,
            },
            "scenarios": {
                "amount": 0,
                "passed": 0,
                "failed": 0,
                "skipped": 0,
                "untested": 0,
                "pending": 0,
            },
            "steps": {
                "amount": 0,
                "passed": 0,
                "failed": 0,
                "skipped": 0,
                "untested": 0,
                "pending": 0,
            },
        }
        pending_steps = []
        duration = timedelta()
        for feature in features:
            if not feature.has_to_run(world.config.scenarios):
                continue
            stats["features"]["amount"] += 1
            stats["features"][feature.state] += 1

            if feature.state in [Step.State.PASSED, Step.State.FAILED]:
                duration += feature.duration

            for scenario in feature.all_scenarios:
                if not scenario.has_to_run(world.config.scenarios):
                    continue

                if isinstance(scenario, ScenarioOutline):  # skip ScenarioOutlines
                    continue
                if isinstance(scenario, ScenarioLoop):  # skip ScenarioLoop
                    continue

                stats["scenarios"]["amount"] += 1
                stats["scenarios"][scenario.state] += 1
                for step in scenario.steps:
                    stats["steps"]["amount"] += 1
                    stats["steps"][step.state] += 1

                    if step.state == Step.State.PENDING:
                        pending_steps.append(step)

        colored_closing_paren = colorful.bold_white(")")
        colored_comma = colorful.bold_white(", ")
        passed_word = colorful.bold_green("{0} passed")
        failed_word = colorful.bold_red("{0} failed")
        skipped_word = colorful.cyan("{0} skipped")
        pending_word = colorful.bold_yellow("{0} pending")

        output = colorful.bold_white(
            "{0} features (".format(stats["features"]["amount"])
        )
        output += passed_word.format(stats["features"]["passed"])
        if stats["features"]["failed"]:
            output += colored_comma + failed_word.format(stats["features"]["failed"])
        if stats["features"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["features"]["skipped"])
        if stats["features"]["pending"]:
            output += colored_comma + pending_word.format(stats["features"]["pending"])
        output += colored_closing_paren

        output += "\n"
        output += colorful.bold_white(
            "{} scenarios (".format(stats["scenarios"]["amount"])
        )
        output += passed_word.format(stats["scenarios"]["passed"])
        if stats["scenarios"]["failed"]:
            output += colored_comma + failed_word.format(stats["scenarios"]["failed"])
        if stats["scenarios"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["scenarios"]["skipped"])
        if stats["scenarios"]["pending"]:
            output += colored_comma + pending_word.format(stats["scenarios"]["pending"])
        output += colored_closing_paren

        output += "\n"
        output += colorful.bold_white("{} steps (".format(stats["steps"]["amount"]))
        output += passed_word.format(stats["steps"]["passed"])
        if stats["steps"]["failed"]:
            output += colored_comma + failed_word.format(stats["steps"]["failed"])
        if stats["steps"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["steps"]["skipped"])
        if stats["steps"]["pending"]:
            output += colored_comma + pending_word.format(stats["steps"]["pending"])
        output += colored_closing_paren

        if pending_steps:
            sr = StepRegistry()
            pending_step_implementations = make_unique_obj_list(
                pending_steps, lambda x: x.definition_func
            )
            output += colorful.white(
                "\nYou have {0} pending step implementation{1} affecting {2} step{3}:\n  {4}\n\nNote: this could be the reason for some failing subsequent steps".format(
                    len(pending_step_implementations),
                    "s" if len(pending_step_implementations) is not 1 else "",
                    len(pending_steps),
                    "s" if len(pending_steps) is not 1 else "",
                    "\n  ".join(
                        [
                            "-  '{0}' @ {1}".format(
                                sr.get_pattern(s.definition_func),
                                get_func_code(s.definition_func).co_filename,
                            )
                            for s in pending_step_implementations
                        ]
                    ),
                )
            )

        output += "\n"

        if world.config.wip:
            if stats["scenarios"]["passed"] > 0:
                output += colorful.red(
                    "\nThe --wip switch was used, so I didn't expect anything to pass. These scenarios passed:\n"
                )

                has_passed_scenarios = False
                for feature in features:
                    passed_scenarios = list(
                        filter(
                            lambda s: s.state == Step.State.PASSED,
                            feature.all_scenarios,
                        )
                    )
                    for scenario in passed_scenarios:
                        output += colorful.red(
                            "\n - {}: {}".format(feature.path, scenario.sentence)
                        )
                        has_passed_scenarios = True

                if has_passed_scenarios:
                    output += "\n"
            else:
                output += colorful.green(
                    "\nThe --wip switch was used, so the failures were expected. All is good.\n"
                )

        output += colorful.cyan(
            "Run {0} finished within {1}".format(
                marker, humanize.naturaldelta(duration)
            )
        )

        write(output)
Exemple #11
0
Fichier : eval.py Projet : yyht/PRS
def validate(task_id, model, data, cat_map, results_dict, last_id,
             additional_report_cats):
    """
    :param additional_report_cats: categories list for additional report. \
        dict {report_name: [cat1, cat2, cat3, ...] \
        ex) major: ['train', 'horse', 'bird', 'clock', ...], \
        minor: ['bicycle', 'potted plant', ....], \
        moderate: ['kite', 'bench', 'teddy bear', ...]

    """
    results = {}
    losses = AverageMeter()  # loss (per word decoded)
    accuracies = AverageMeter()
    class_precisions = Group_AverageMeter()
    class_recalls = Group_AverageMeter()
    class_f1s = Group_AverageMeter()

    overall_precisions = AverageMeter()
    overall_recalls = AverageMeter()
    overall_f1s = AverageMeter()

    mAP = AverageMeter()

    criterion = model.criterion

    print(
        colorful.bold_yellow('Validating Task: ' + str(task_id)).styled_string)

    cpu_targets = []
    cpu_predicts = []
    cpu_probs = []
    with torch.no_grad():
        for i, (imgs, cats) in enumerate(iter(data)):
            # batch_size
            batch_size = imgs.shape[0]

            # Move to device, if available
            imgs = imgs.to(model.device)
            cats = cats.to(model.device)

            # Forward prop.
            predict = model.encoder(imgs)
            targets = cats
            # Calculate loss
            loss = criterion(predict, targets)
            loss = loss.mean()

            predict = torch.sigmoid(predict)

            # for mAP score
            cpu_probs.append(predict.cpu())

            predict = predict > 0.5  # BCE
            total_relevant_slots = targets.sum().data
            relevant_predict = (predict * targets.float()).sum().data

            acc = relevant_predict / total_relevant_slots
            losses.update(loss.item(), batch_size)
            accuracies.update(acc, batch_size)

            cpu_targets.append(targets.cpu())
            cpu_predicts.append(predict.cpu())

        cpu_targets = torch.cat(cpu_targets, axis=0)
        cpu_predicts = torch.cat(cpu_predicts, axis=0)
        cpu_probs = torch.cat(cpu_probs, axis=0)

        ncats = cpu_targets.sum(axis=0)
        # ignore classes in other tasks
        cats_in_task_idx = ncats > 0
        ncats = ncats[cats_in_task_idx].tolist()

        f1_pc = f1_score_per_class(cpu_targets[:, cats_in_task_idx],
                                   cpu_predicts[:, cats_in_task_idx],
                                   zero_division=0)
        precision_pc = precision_score_per_class(
            cpu_targets[:, cats_in_task_idx],
            cpu_predicts[:, cats_in_task_idx],
            zero_division=0)
        recall_pc = recall_score_per_class(cpu_targets[:, cats_in_task_idx],
                                           cpu_predicts[:, cats_in_task_idx],
                                           zero_division=0)

        f1_oa = f1_score_overall(cpu_targets[:, cats_in_task_idx],
                                 cpu_predicts[:, cats_in_task_idx],
                                 zero_division=0)
        precision_oa = precision_score_overall(cpu_targets[:,
                                                           cats_in_task_idx],
                                               cpu_predicts[:,
                                                            cats_in_task_idx],
                                               zero_division=0)
        recall_oa = recall_score_overall(cpu_targets[:, cats_in_task_idx],
                                         cpu_predicts[:, cats_in_task_idx],
                                         zero_division=0)

        # record performances
        cats_in_task_name = cat_map[cats_in_task_idx].tolist()
        class_f1s.update(cats_in_task_name, f1_pc.tolist(), ncats)
        class_precisions.update(cats_in_task_name, precision_pc.tolist(),
                                ncats)
        class_recalls.update(cats_in_task_name, recall_pc.tolist(), ncats)

        overall_f1s.update(f1_oa.item(), len(cpu_targets))
        overall_precisions.update(precision_oa.item(), len(cpu_targets))
        overall_recalls.update(recall_oa.item(), len(cpu_targets))

        # mAP
        mAP.update(
            mean_average_precision(cpu_targets[:, cats_in_task_idx],
                                   cpu_probs[:, cats_in_task_idx]))

        # for reporting major, moderate, minor cateogory performances
        for report_name in additional_report_cats.keys():
            reporter = Group_AverageMeter()

            # get report category idxes
            all_cats = cat_map.tolist()
            task_cats = set(cats_in_task_name)
            report_cats = task_cats & set(additional_report_cats[report_name])
            cats_idx = []
            for cat in report_cats:
                cats_idx.append(all_cats.index(cat))
            report_cats_idx = torch.tensor(cats_idx, dtype=torch.long)

            # there are tasks where the min/mod/maj are missing.
            if len(report_cats_idx) == 0:
                reporter.update(['CP'], [float('NaN')], [1])
                reporter.update(['CR'], [float('NaN')], [1])
                reporter.update(['CF1'], [float('NaN')], [1])
                reporter.update(['OP'], [float('NaN')], [1])
                reporter.update(['OR'], [float('NaN')], [1])
                reporter.update(['OF1'], [float('NaN')], [1])
                reporter.update(['mAP'], [float('NaN')], [1])

                # for major, moderate and minor report, total is a meaningless metric.
                # mean of CP, CR, CF1, ... is meaningless.
                reporter.total.reset()
                # add to results
                results[report_name] = reporter
                continue

            # CP, CR, CF1 performance of report_categories.
            _class_precision = precision_score_per_class(
                cpu_targets[:, report_cats_idx],
                cpu_predicts[:, report_cats_idx],
                zero_division=0)
            _class_recall = recall_score_per_class(
                cpu_targets[:, report_cats_idx],
                cpu_predicts[:, report_cats_idx],
                zero_division=0)
            _class_precision = torch.mean(_class_precision)
            _class_recall = torch.mean(_class_recall)
            # CF1 bias. note that CF1 is not a mean value of categories' f1_score
            _class_f1 = ((2*_class_precision*_class_recall)/(_class_precision+_class_recall)) \
                if (_class_precision+_class_recall)>0 else torch.tensor([0.])

            # OP, OR, OF1 performance of report_categories.
            _overall_precision = precision_score_overall(
                cpu_targets[:, report_cats_idx],
                cpu_predicts[:, report_cats_idx],
                zero_division=0)
            _overall_recall = recall_score_overall(
                cpu_targets[:, report_cats_idx],
                cpu_predicts[:, report_cats_idx],
                zero_division=0)
            _overall_f1 = f1_score_overall(cpu_targets[:, report_cats_idx],
                                           cpu_predicts[:, report_cats_idx],
                                           zero_division=0)

            # mAP performance of report_categories.
            _mAP = mean_average_precision(cpu_targets[:, report_cats_idx],
                                          cpu_probs[:, report_cats_idx])

            reporter.update(['CP'], [_class_precision.item()], [1])
            reporter.update(['CR'], [_class_recall.item()], [1])
            reporter.update(['CF1'], [_class_f1.item()], [1])
            reporter.update(['OP'], [_overall_precision.item()], [1])
            reporter.update(['OR'], [_overall_recall.item()], [1])
            reporter.update(['OF1'], [_overall_f1.item()], [1])
            reporter.update(['mAP'], [_mAP.item()], [1])

            # for major, moderate and minor report, total is a meaningless metric.
            # mean of CP, CR, CF1, ... is meaningless.
            reporter.total.reset()

            # add to results
            results[report_name] = reporter

        # CF1 bias. note that CF1 is not a mean value of categories' f1_score
        class_f1s.total.reset()
        p_pc, r_pc = torch.mean(precision_pc).item(), torch.mean(
            recall_pc).item()
        class_f1s.total.update(((2 * p_pc * r_pc) /
                                (p_pc + r_pc)) if (p_pc + r_pc) > 0 else 0)

        # save performances
        results['OF1'] = overall_f1s
        results['OP'] = overall_precisions
        results['OR'] = overall_recalls
        results['CF1'] = class_f1s
        results['CP'] = class_precisions
        results['CR'] = class_recalls
        results['losses'] = losses
        results['accuracies'] = accuracies

        results['mAP'] = mAP

        # Forgetting Measure
        if int(task_id) == int(last_id) and len(results_dict) > 0:
            forget_metrics = ['mAP', 'OF1', 'CF1']
            forget = Group_AverageMeter()
            Cf1_forget = Group_AverageMeter()
            forget_results = {}
            per_cat_forget_results = {}
            for metric in forget_metrics:
                per_metric_results = {}
                for task_name, per_task_results in results_dict.items():
                    if metric == 'CF1':
                        per_total_lst = []
                        per_cat_dict = {}
                        # only up to the 2nd last are used to find the max.
                        for per_task_result in per_task_results[:-1]:
                            per_total_lst.append(
                                per_task_result[metric].total.avg)
                            for cat, cat_avgmtr in per_task_result[
                                    metric].data.items():
                                if cat in per_cat_dict:
                                    per_cat_dict[cat].append(cat_avgmtr.avg)
                                else:
                                    per_cat_dict[cat] = [cat_avgmtr.avg]

                        final_task_result = per_task_results[-1][
                            metric].total.avg
                        max_task_result = max(per_total_lst)
                        # subtract the very last added and max of the tasks before.
                        metric_forgot = None
                        if max_task_result == 0 and final_task_result == 0:
                            forget_results[metric + '_' + str(task_name)] = 1.0
                            metric_forgot = 1.0
                        elif max_task_result == 0 and final_task_result != 0:
                            metric_forgot = (max_task_result -
                                             final_task_result) / 1
                            forget_results[metric + '_' +
                                           str(task_name)] = metric_forgot
                        else:
                            metric_forgot = (
                                max_task_result -
                                final_task_result) / abs(max_task_result)
                            forget_results[metric + '_' +
                                           str(task_name)] = metric_forgot

                        for cat, catobj in per_task_results[-1][
                                metric].data.items():
                            max_cat_result = max(per_cat_dict[cat])
                            final_cat_result = catobj.avg
                            if max_cat_result == 0 and final_cat_result == 0:
                                per_cat_forget_results[cat] = 1.0
                            elif max_cat_result == 0 and final_cat_result != 0:
                                per_cat_forget_results[
                                    cat] = max_cat_result - catobj.avg / 1
                            else:
                                per_cat_forget_results[cat] = (max_cat_result \
                                        - catobj.avg)/abs(max_cat_result)
                    else:
                        per_metric_lst = []
                        for per_task_result in per_task_results[:-1]:
                            per_metric_lst.append(per_task_result[metric].avg)

                        metric_forgot = None
                        final_task_result = per_task_results[-1][metric].avg
                        max_task_result = max(per_metric_lst)
                        if max_task_result == 0 and final_task_result == 0:
                            metric_forgot = 1.0
                            forget_results[metric + '_' +
                                           str(task_name)] = metric_forgot
                        elif max_task_result == 0 and final_task_result != 0:
                            metric_forgot = (max_task_result -
                                             final_task_result) / 1
                            forget_results[metric + '_' +
                                           str(task_name)] = metric_forgot
                        else:
                            metric_forgot = (
                                max_task_result -
                                final_task_result) / abs(max_task_result)
                            forget_results[metric + '_' +
                                           str(task_name)] = metric_forgot

                    for split in ['major', 'moderate', 'minor']:
                        # check if split results in all NaNs
                        if math.isnan(
                                per_task_results[0][split].data[metric].avg):
                            forget_results[split + '_' + metric + '_' +
                                           str(task_name)] = float('NaN')
                            continue

                        per_metric_lst = []
                        for per_task_result in per_task_results[:-1]:
                            per_metric_lst.append(
                                per_task_result[split].data[metric].avg)
                        final_task_result = per_task_results[-1][split].data[
                            metric].avg
                        max_task_result = max(per_metric_lst)
                        split_forgot = None
                        if max_task_result == 0 and final_task_result == 0:
                            split_forgot = 1.0  # forgotten within the first task by majority dominance.
                            forget_results[split + '_' + metric + '_' +
                                           str(task_name)] = split_forgot
                        elif max_task_result == 0 and final_task_result != 0:
                            split_forgot = (max_task_result -
                                            final_task_result) / 1
                            forget_results[split + '_' + metric + '_' +
                                           str(task_name)] = split_forgot
                        else:
                            split_forgot = (max_task_result - final_task_result
                                            ) / abs(max_task_result)
                            forget_results[split + '_' + metric + '_' +
                                           str(task_name)] = split_forgot

                        if metric + split + 'Overall' in per_metric_results.keys(
                        ):
                            per_metric_results[metric + split +
                                               'Overall'].append(split_forgot)
                        else:
                            per_metric_results[metric + split +
                                               'Overall'] = [split_forgot]

                    if metric + 'Overall' in per_metric_results.keys():
                        per_metric_results[metric +
                                           'Overall'].append(metric_forgot)
                    else:
                        per_metric_results[metric +
                                           'Overall'] = [metric_forgot]

                forget_results[metric + 'Overall'] = average_lst(
                    per_metric_results[metric + 'Overall'])
                forget_results[metric + 'majorOverall'] = average_lst(
                    per_metric_results[metric + 'majorOverall'])
                forget_results[metric + 'moderateOverall'] = average_lst(
                    per_metric_results[metric + 'moderateOverall'])
                forget_results[metric + 'minorOverall'] = average_lst(
                    per_metric_results[metric + 'minorOverall'])

            keys = []
            values = []
            n = []
            for k, v in forget_results.items():
                keys.append(k)
                values.append(v)
                n.append(1)

            forget.update(keys, values, n)

            keys = []
            values = []
            n = []
            for k, v in per_cat_forget_results.items():
                keys.append(k)
                values.append(v)
                n.append(1)

            Cf1_forget.update(keys, values, n)

            results['forget'] = forget
            results['class_forget'] = Cf1_forget

        print(
            colorful.bold_cyan(
                'LOSS - {loss.avg:.3f}, ACCURACY - {acc.avg:.3f}, RECALL - {rcl.total.avg:.4f},\
            PRECISION - {prc.total.avg:.4f}, F1 - {f1.total.avg:.4f}'.format(
                    loss=losses,
                    acc=accuracies,
                    rcl=class_recalls,
                    prc=class_precisions,
                    f1=class_f1s)).styled_string)

    return results, cpu_targets, cpu_probs
Exemple #12
0
 def show_warn(cls, msg=None):
     text = str(colorful.bold_yellow('*** WARNING ***'))
     if msg:
         text += '\n' + str(msg)
     cls.stderr(text)
Exemple #13
0
def test_step_matches_configs(match_config_files,
                              basedirs,
                              cover_min_percentage=None,
                              cover_show_missing=False):
    """
    Test if the given match config files matches the actual
    matched step implementations.
    """
    if cover_min_percentage is not None and float(cover_min_percentage) > 100:
        sys.stderr.write(
            str(
                colorful.magenta(
                    'You are a little cocky to think you can reach a minimum coverage of {0:.2f}%\n'
                    .format(float(cover_min_percentage)))))
        return 3

    # load user's custom python files
    for basedir in basedirs:
        load_modules(basedir)

    steps = StepRegistry().steps

    if not steps:
        sys.stderr.write(
            str(
                colorful.magenta(
                    'No step implementations found in {0}, thus doesn\'t make sense to continue'
                    .format(basedirs))))
        return 4

    failed = 0
    passed = 0
    covered_steps = set()

    for match_config_file in match_config_files:
        # load the given match config file
        with codecs.open(match_config_file, "r", "utf-8") as f:
            match_config = yaml.safe_load(f)

        if not match_config:
            print(
                colorful.magenta(
                    'No sentences found in {0} to test against'.format(
                        match_config_file)))
            return 5

        print(
            colorful.yellow('Testing sentences from {0}:'.format(
                colorful.bold_yellow(match_config_file))))
        failed_sentences, passed_senteces = test_step_matches(
            match_config, steps)
        failed += failed_sentences
        passed += passed_senteces

        covered_steps = covered_steps.union(x['should_match']
                                            for x in match_config
                                            if 'should_match' in x)

        # newline
        sys.stdout.write('\n')

    report = colorful.bold_white('{0} sentences ('.format(failed + passed))
    if passed > 0:
        report += colorful.bold_green('{0} passed'.format(passed))

    if passed > 0 and failed > 0:
        report += colorful.bold_white(', ')

    if failed > 0:
        report += colorful.bold_red('{0} failed'.format(failed))
    report += colorful.bold_white(')')
    print(report)

    step_coverage = 100.0 / len(steps) * len(covered_steps)
    coverage_report = colorful.bold_white(
        'Covered {0} of {1} step implementations'.format(
            len(covered_steps), len(steps)))

    ret = 0 if failed == 0 else 1

    if cover_min_percentage:
        coverage_color = colorful.bold_green if step_coverage >= float(
            cover_min_percentage) else colorful.bold_red
        coverage_report += colorful.bold_white(' (coverage: ')
        coverage_report += coverage_color('{0:.2f}%'.format(step_coverage))
        if float(cover_min_percentage) > step_coverage:
            coverage_report += colorful.bold_white(
                ', expected a minimum of {0}'.format(
                    colorful.bold_green(cover_min_percentage + '%')))
            if failed == 0:
                ret = 2
            # if tests have passed and coverage is too low we fail with exit code 2
        coverage_report += colorful.bold_white(')')

    print(coverage_report)

    if cover_show_missing:
        missing_steps = get_missing_steps(steps, covered_steps)
        if missing_steps:
            missing_step_report = colorful.bold_yellow('Missing steps:\n')
            for step in missing_steps:
                missing_step_report += '- {0} at '.format(
                    colorful.cyan(step[0]))
                missing_step_report += colorful.cyan(step[1]) + '\n'
            sys.stdout.write(missing_step_report)

    return ret
    def console_write(self, features, marker):
        """
            Writes the endreport for all features

            :param list features: all features
        """
        stats = {
            "features": {"amount": 0, "passed": 0, "failed": 0, "skipped": 0, "untested": 0, "pending": 0},
            "scenarios": {"amount": 0, "passed": 0, "failed": 0, "skipped": 0, "untested": 0, "pending": 0},
            "steps": {"amount": 0, "passed": 0, "failed": 0, "skipped": 0, "untested": 0, "pending": 0},
        }
        pending_steps = []
        duration = timedelta()
        for feature in features:
            if not feature.has_to_run(world.config.scenarios):
                continue
            stats["features"]["amount"] += 1
            stats["features"][feature.state] += 1

            if feature.state in [Step.State.PASSED, Step.State.FAILED]:
                duration += feature.duration

            for scenario in feature.all_scenarios:
                if not scenario.has_to_run(world.config.scenarios):
                    continue

                if isinstance(scenario, ScenarioOutline):  # skip ScenarioOutlines
                    continue
                if isinstance(scenario, ScenarioLoop):  # skip ScenarioLoop
                    continue

                stats["scenarios"]["amount"] += 1
                stats["scenarios"][scenario.state] += 1
                for step in scenario.steps:
                    stats["steps"]["amount"] += 1
                    stats["steps"][step.state] += 1

                    if step.state == Step.State.PENDING:
                        pending_steps.append(step)

        colored_closing_paren = colorful.bold_white(")")
        colored_comma = colorful.bold_white(", ")
        passed_word = colorful.bold_green("{0} passed")
        failed_word = colorful.bold_red("{0} failed")
        skipped_word = colorful.cyan("{0} skipped")
        pending_word = colorful.bold_yellow("{0} pending")

        output = colorful.bold_white("{0} features (".format(stats["features"]["amount"]))
        output += passed_word.format(stats["features"]["passed"])
        if stats["features"]["failed"]:
            output += colored_comma + failed_word.format(stats["features"]["failed"])
        if stats["features"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["features"]["skipped"])
        if stats["features"]["pending"]:
            output += colored_comma + pending_word.format(stats["features"]["pending"])
        output += colored_closing_paren

        output += "\n"
        output += colorful.bold_white("{} scenarios (".format(stats["scenarios"]["amount"]))
        output += passed_word.format(stats["scenarios"]["passed"])
        if stats["scenarios"]["failed"]:
            output += colored_comma + failed_word.format(stats["scenarios"]["failed"])
        if stats["scenarios"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["scenarios"]["skipped"])
        if stats["scenarios"]["pending"]:
            output += colored_comma + pending_word.format(stats["scenarios"]["pending"])
        output += colored_closing_paren

        output += "\n"
        output += colorful.bold_white("{} steps (".format(stats["steps"]["amount"]))
        output += passed_word.format(stats["steps"]["passed"])
        if stats["steps"]["failed"]:
            output += colored_comma + failed_word.format(stats["steps"]["failed"])
        if stats["steps"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["steps"]["skipped"])
        if stats["steps"]["pending"]:
            output += colored_comma + pending_word.format(stats["steps"]["pending"])
        output += colored_closing_paren

        if pending_steps:
            sr = StepRegistry()
            pending_step_implementations = make_unique_obj_list(pending_steps, lambda x: x.definition_func)
            output += colorful.white("\nYou have {0} pending step implementation{1} affecting {2} step{3}:\n  {4}\n\nNote: this could be the reason for some failing subsequent steps".format(
                len(pending_step_implementations),
                "s" if len(pending_step_implementations) is not 1 else "",
                len(pending_steps),
                "s" if len(pending_steps) is not 1 else "",
                "\n  ".join(["-  '{0}' @ {1}".format(sr.get_pattern(s.definition_func), get_func_code(s.definition_func).co_filename) for s in pending_step_implementations])
            ))

        output += "\n"
        output += colorful.cyan("Run {0} finished within {1}".format(marker, humanize.naturaldelta(duration)))

        write(output)
Exemple #15
0
                time.sleep(aralik)
            self.scroll_to_bottom()
            for i in range(takipsayisi):
                self.click("//span[contains(.,'Takip et')]")
                time.sleep(aralik)
            self.scroll_to_bottom()
            for i in range(takipsayisi):
                self.click("//span[contains(.,'Takip et')]")
                time.sleep(aralik)
            self.scroll_to_bottom()
            for i in range(takipsayisi):
                self.click("//span[contains(.,'Takip et')]")
                time.sleep(aralik)
            self.scroll_to_bottom()
            for i in range(takipsayisi):
                self.click("//span[contains(.,'Takip et')]")
                time.sleep(aralik)
            self.scroll_to_bottom()
            for i in range(takipsayisi):
                self.click("//span[contains(.,'Takip et')]")
                time.sleep(aralik)
            self.scroll_to_bottom()
            for i in range(takipsayisi):
                self.click("//span[contains(.,'Takip et')]")
                time.sleep(aralik)
            self.scroll_to_bottom()

else:
    time.sleep(1)
    print(cf.bold_yellow("\nGeçerli Bir Seçim Yapıp Tekrar Dene\n"))
Exemple #16
0
 def save_model(self, epoch):
     save_path = os.path.join(self.save_dir, str(epoch) + '.pkl')
     print(
         colorful.bold_yellow(
             'Save model parameters to {}'.format(save_path)).styled_string)
     torch.save(self.model.state_dict(), save_path)
Exemple #17
0
Fichier : main.py Projet : yyht/PRS
def main():
    args = parser.parse_args()
    logger = setup_logger()

    ## Use below for slurm setting.
    # slurm_job_id = os.getenv('SLURM_JOB_ID', 'nojobid')
    # slurm_proc_id = os.getenv('SLURM_PROC_ID', None)

    # unique_identifier = str(slurm_job_id)
    # if slurm_proc_id is not None:
    #     unique_identifier = unique_identifier + "_" + str(slurm_proc_id)
    unique_identifier = ''

    # Load config
    config_path = args.config
    episode_path = args.episode

    if args.resume_ckpt and not args.config:
        base_dir = os.path.dirname(os.path.dirname(args.resume_ckpt))
        config_path = os.path.join(base_dir, 'config.yaml')
        episode_path = os.path.join(base_dir, 'episode.yaml')
    config = yaml.load(open(config_path), Loader=yaml.FullLoader)
    episode = yaml.load(open(episode_path), Loader=yaml.FullLoader)
    config['data_schedule'] = episode

    # Override options
    for option in args.override.split('|'):
        if not option:
            continue
        address, value = option.split('=')
        keys = address.split('.')
        here = config
        for key in keys[:-1]:
            if key not in here:
                raise ValueError('{} is not defined in config file. '
                                 'Failed to override.'.format(address))
            here = here[key]
        if keys[-1] not in here:
            raise ValueError('{} is not defined in config file. '
                             'Failed to override.'.format(address))
        here[keys[-1]] = yaml.load(value, Loader=yaml.FullLoader)


    # Set log directory
    config['log_dir'] = os.path.join(args.log_dir, unique_identifier)
    if not args.resume_ckpt and os.path.exists(config['log_dir']):
        logger.warning('%s already exists' % config['log_dir'])
        input('Press enter to continue')

    # print the configuration
    print(colorful.bold_white("configuration:").styled_string)
    pprint(config)
    print(colorful.bold_white("configuration end").styled_string)

    if args.resume_ckpt and not args.log_dir:
        config['log_dir'] = os.path.dirname(
            os.path.dirname(args.resume_ckpt)
        )

    # Save config
    os.makedirs(config['log_dir'], mode=0o755, exist_ok=True)
    if not args.resume_ckpt or args.config:
        config_save_path = os.path.join(config['log_dir'], 'config.yaml')
        episode_save_path = os.path.join(config['log_dir'], 'episode.yaml')
        yaml.dump(config, open(config_save_path, 'w'))
        yaml.dump(episode, open(episode_save_path, 'w'))
        print(colorful.bold_yellow('config & episode saved to {}'.format(config['log_dir'])).styled_string)

    # Build components
    data_scheduler = DataScheduler(config)

    writer = SummaryWriter(config['log_dir'])
    model = MODEL[config['model_name']](config, writer)

    if args.resume_ckpt:
        model.load_state_dict(torch.load(args.resume_ckpt))
    model.to(config['device'])
    train_model(config, model, data_scheduler, writer)

    print(colorful.bold_white("\nThank you and Good Job Computer").styled_string)
Exemple #18
0
def test_step_matches_configs(
    match_config_files, basedirs, cover_min_percentage=None, cover_show_missing=False
):
    """
    Test if the given match config files matches the actual
    matched step implementations.
    """
    if cover_min_percentage is not None and float(cover_min_percentage) > 100:
        sys.stderr.write(
            str(
                colorful.magenta(
                    "You are a little cocky to think you can reach a minimum coverage of {0:.2f}%\n".format(
                        float(cover_min_percentage)
                    )
                )
            )
        )
        return 3

    # load user's custom python files
    for basedir in basedirs:
        load_modules(basedir)

    steps = StepRegistry().steps

    if not steps:
        sys.stderr.write(
            str(
                colorful.magenta(
                    "No step implementations found in {0}, thus doesn't make sense to continue".format(
                        basedirs
                    )
                )
            )
        )
        return 4

    failed = 0
    passed = 0
    covered_steps = set()

    for match_config_file in match_config_files:
        # load the given match config file
        with codecs.open(match_config_file, "r", "utf-8") as f:
            match_config = yaml.safe_load(f)

        if not match_config:
            print(
                colorful.magenta(
                    "No sentences found in {0} to test against".format(
                        match_config_file
                    )
                )
            )
            return 5

        print(
            colorful.yellow(
                "Testing sentences from {0}:".format(
                    colorful.bold_yellow(match_config_file)
                )
            )
        )
        failed_sentences, passed_senteces = test_step_matches(match_config, steps)
        failed += failed_sentences
        passed += passed_senteces

        covered_steps = covered_steps.union(
            x["should_match"] for x in match_config if "should_match" in x
        )

        # newline
        sys.stdout.write("\n")

    report = colorful.bold_white("{0} sentences (".format(failed + passed))
    if passed > 0:
        report += colorful.bold_green("{0} passed".format(passed))

    if passed > 0 and failed > 0:
        report += colorful.bold_white(", ")

    if failed > 0:
        report += colorful.bold_red("{0} failed".format(failed))
    report += colorful.bold_white(")")
    print(report)

    step_coverage = 100.0 / len(steps) * len(covered_steps)
    coverage_report = colorful.bold_white(
        "Covered {0} of {1} step implementations".format(len(covered_steps), len(steps))
    )

    ret = 0 if failed == 0 else 1

    if cover_min_percentage:
        coverage_color = (
            colorful.bold_green
            if step_coverage >= float(cover_min_percentage)
            else colorful.bold_red
        )
        coverage_report += colorful.bold_white(" (coverage: ")
        coverage_report += coverage_color("{0:.2f}%".format(step_coverage))
        if float(cover_min_percentage) > step_coverage:
            coverage_report += colorful.bold_white(
                ", expected a minimum of {0}".format(
                    colorful.bold_green(cover_min_percentage + "%")
                )
            )
            if failed == 0:
                ret = 2
            # if tests have passed and coverage is too low we fail with exit code 2
        coverage_report += colorful.bold_white(")")

    print(coverage_report)

    if cover_show_missing:
        missing_steps = get_missing_steps(steps, covered_steps)
        if missing_steps:
            missing_step_report = colorful.bold_yellow("Missing steps:\n")
            for step in missing_steps:
                missing_step_report += "- {0} at ".format(colorful.cyan(step[0]))
                missing_step_report += colorful.cyan(step[1]) + "\n"
            sys.stdout.write(missing_step_report)

    return ret
Exemple #19
0
import glob
import colorful
import time
from time import gmtime, strftime
import sys

total1 = len(glob.glob('/home/pi/Brian/gallery/*'))
print colorful.bold_orange('NEW TOTAL:'), colorful.bold_orange(total1)

total2 = len(glob.glob('/home/pi/Brian/removed/*'))
print colorful.bold_red('AWW TOTAL:'), colorful.bold_red(total2)

time1 = strftime("%d-%m-%y", gmtime())
time2 = strftime("%H:%M:%S", gmtime())
print colorful.bold_yellow(time1), colorful.bold_yellow(time2)