Esempio n. 1
0
def dbchecksum(db, config, args):
    """
    Display the checksums of rulesets and of each rule in that ruleset.
    """
    usage = "%prog [options] dbchecksum [ruleset]"
    doc = "Compute checksum for the specified ruleset " + \
    " (all if not specified)"
    parser = optparse.OptionParser(usage, description=doc)
    (options, action_args) = parser.parse_args(args)
    if len(action_args) > 1:
        parser.error(DBCHECKSUM_ACTION_NAME + \
                         ": too many arguments %d, maximum is %d" %\
                         (len(action_args), 1))
    tab_values = []
    if len(action_args) == 1:
        ruleset_name = action_args[0]
        (ruleset_h, h_for) = db.checksum(ruleset_name)
        for rulename in h_for:
            tab_values.append([ruleset_name,
                               rulename,
                               h_for[rulename].hexdigest()])
        tab_values.append([ruleset_name, HSEP, ruleset_h.hexdigest()])
    else:
        rules_map = db.get_rules_map()
        for ruleset_name in rules_map:
            (ruleset_h, h_for) = db.checksum(ruleset_name)
            for rulename in h_for:
                tab_values.append([ruleset_name,
                                   rulename,
                                   h_for[rulename].hexdigest()])
            tab_values.append([ruleset_name, HSEP, ruleset_h.hexdigest()])
    _LOGGER.output(smart_display(CHECKSUM_HEADER,
                                 tab_values,
                                 vsep=u' | '))
Esempio n. 2
0
def knowntypes(db, config, args):
    """
    This action fetches the rules from the DB sequencer table and calls
    the DGM stage for the creation of the corresponding ruleset and to fetch
    the root rules mapping. The result is then displayed on the screen.
    """
    usage = "Usage: %prog [global_options] " + KNOWNTYPES_ACTION_NAME + \
        " [action_options] ruleset"
    doc = KNOWNTYPES_DOC + \
        " For each displayed types, the starting rules that will" + \
        " be applied on them for the" + \
        " computation of the dependency graph is also given."
    cmd = os.path.basename(sys.argv[0])
    progname=to_unicode(cmd).encode('ascii', 'replace')
    parser = optparse.OptionParser(usage, description=doc, prog=progname)
    (options, action_args) = parser.parse_args(args)
    if len(action_args) != 1:
        parser.error(KNOWNTYPES_ACTION_NAME + ": ruleSet is missing.")

    req_ruleset = action_args[0]
    rules = db.get_rules_for(req_ruleset)
    ruleset = RuleSet(rules.values())
    mapping = ruleset.root_rules_for
    tab_values = []
    # Sort according to category
    for type_ in sorted(mapping, key=lambda t: t[t.find('@') + 1]):
        for rule in mapping[type_]:
            line = [type_, rule.filter, rule.name, rule.action]
            tab_values.append([u"NONE" if x is None else x for x in line])

    _LOGGER.output(smart_display([u"Type",
                                  u"Filter",
                                  u"Rule Name",
                                  u"Action"],
                                 tab_values, vsep=u" | "))
 def testRemoveSomeColumns(self):
     # Two columns only one remain
     output = smart_display([u"T1", u"T2"], [[u"d1.1", u"d2.1"], [u"d1.2", u"d2.2"]], columns_max={"T1": 0})
     self.assertIsNone(re.search(r"^.*T1.*$", output, flags=re.MULTILINE), output)
     self.assertIsNone(re.search(r"^.*d1.*$", output, flags=re.MULTILINE), output)
     self.assertIsNotNone(re.search(r"^.*T2.*$", output, flags=re.MULTILINE), output)
     self.assertIsNotNone(re.search(r"^.*d2.*$", output, flags=re.MULTILINE), output)
Esempio n. 4
0
    def testFILLERSpecified(self):
        output = smart_display([u"T1", u"T2"],
                               [[u"d1.1", u"d2.1"],
                                [FILL_EMPTY_ENTRY, u"d2.2"]])
        self.assertIsNotNone(re.search(r'^.*T1.*$', output,
                                       flags=re.MULTILINE),
                             output)
        self.assertIsNotNone(re.search(r'^.*d1.*$', output,
                                       flags=re.MULTILINE),
                             output)
        # Here a faked sample of what we are looking for:
        match = re.search(r'^-+ +| +d2.2 $', '------ | d2.2',
                          flags=re.MULTILINE)
        print("Faked matching at: %s" % match.string[match.start():match.end()])
        assert match is not None
        # Now, doing the same on the actual output
        print(output)
        match = re.search(r'^-+ +| +d2.2 $', output, flags=re.MULTILINE)
        self.assertIsNotNone(match, output)

        print("Matching at: %s" % match.string[match.start():match.end()])
        self.assertIsNotNone(re.search(r'^.*T2.*$', output,
                                       flags=re.MULTILINE),
                             output)
        self.assertIsNotNone(re.search(r'^.*d2.*$', output,
                                       flags=re.MULTILINE),
                             output)
 def testSpecifySingleColumn(self):
     output = smart_display(
         [u"T1", u"T2"], [[u"d1.1", u"d2.1"], [u"d1.2", u"d2.2"]], columns_max={"T2": REMOVE_UNSPECIFIED_COLUMNS}
     )
     self.assertIsNone(re.search(r"^.*T1.*$", output, flags=re.MULTILINE), output)
     self.assertIsNone(re.search(r"^.*d1.*$", output, flags=re.MULTILINE), output)
     self.assertIsNotNone(re.search(r"^.*T2.*$", output, flags=re.MULTILINE), output)
     self.assertIsNotNone(re.search(r"^.*d2.*$", output, flags=re.MULTILINE), output)
Esempio n. 6
0
 def testRemoveAllColumns(self):
     # Removing single column -> no output at all!!
     output = smart_display([u"Title"], [[u"data1"], [u"data2"]],
                            columns_max={'Title':0})
     self.assertIsNone(re.search(r'^.*Title.*$', output,
                                 flags=re.MULTILINE),
                       output)
     self.assertIsNone(re.search(r'^.*data.*$', output,
                                 flags=re.MULTILINE),
                       output)
Esempio n. 7
0
 def testSimpleOutput(self):
     output = smart_display([u"Title"], [[u"data1"], [u"data2"]])
     self.assertIsNotNone(re.search(r'^.*Title.*$', output,
                                    flags=re.MULTILINE),
                          output)
     self.assertIsNotNone(re.search(r'^.*data1.*$', output,
                                    flags=re.MULTILINE),
                          output)
     self.assertIsNotNone(re.search(r'^.*data2.*$', output,
                                    flags=re.MULTILINE),
                          output)
Esempio n. 8
0
def _report_unexec(a_model, execution):
    """
    Display the 'unexec' type of report
    """
    all_actions_set = set(a_model.actions.keys())
    all_actions_set_nb = len(all_actions_set)
    executed_actions_set = set(execution.executed_actions.keys())
    unexecuted_actions_set = all_actions_set.difference(executed_actions_set)
    unexecuted_actions_nb = len(unexecuted_actions_set)
    try:
        percentage = (float(unexecuted_actions_nb) / all_actions_set_nb) * 100
    except ZeroDivisionError:
        percentage = 0.0
    _LOGGER.output("\nUnexecuted Actions: %d (%2.1f %%)\t" + \
                       "Legend: mDeps=missings (error or unexecuted)" + \
                       " dependencies",
                   unexecuted_actions_nb, percentage)
    tab_values = []
    # Sort by len() first then alphabetically so:
    # b1, b2, b20, c1, c2, c10, c100 appears in that order
    sorted_list = sorted(unexecuted_actions_set, key = len)
    for id_ in sorted(sorted_list):
        action = a_model.actions[id_]
        all_deps = action.all_deps()
        all_deps_nb = len(all_deps)
        unexec = set(all_deps) - set(execution.executed_actions.keys())
        error = set(all_deps) & set(execution.error_actions.keys())
        missings = unexec.union(error)
        nodeset = NodeSet()
        missing_nb = len(missings)
        for missing in missings:
            if len(missing) != 0:
                nodeset.add(missing)
        try:
            percentage = ((float(missing_nb) / all_deps_nb) * 100)
        except ZeroDivisionError:
            percentage = 0.0
        tab_values.append([id_, str(len(all_deps)),
                           str(missing_nb),
                           u"%2.1f" % percentage,
                           str(nodeset)])
    output = smart_display([u"Id", u"#Deps",
                            u"#mDeps", u"%mDeps",
                            u"mDeps"],
                           tab_values, vsep=u" | ",
                           justify=[str.center, str.center,
                                    str.center, str.center,
                                    str.ljust])
    _LOGGER.output(output)
Esempio n. 9
0
def _display(rules, columns_max):
    """
    Display all given rules using smart_display().
    """
    tab_values = []

    for rule in  sorted(rules, key=operator.attrgetter('ruleset', 'name')):
        line = [rule.ruleset, rule.name, ','.join(rule.types),
                rule.filter, rule.action, rule.depsfinder,
                None if len(rule.dependson) == 0 \
                    else ','.join(sorted(rule.dependson)),
                rule.comments, rule.help]
        tab_values.append([u"NONE" if x is None else x for x in line])

    _LOGGER.output(smart_display(RULES_HEADER, tab_values,
                                 vsep=u' | ', columns_max=columns_max))
Esempio n. 10
0
def dbchecksum(db, config, args):
    """
    Display the checksums of rulesets and of each rule in that ruleset.
    """
    usage = "%prog [options] dbchecksum [ruleset]"
    doc = "Compute checksum for the specified ruleset " + \
    " (all if not specified)"
    cmd = os.path.basename(sys.argv[0])
    progname = to_unicode(cmd).encode('ascii', 'replace')
    parser = optparse.OptionParser(usage, description=doc, prog=progname)
    (options, action_args) = parser.parse_args(args)
    if len(action_args) > 1:
        parser.error(DBCHECKSUM_ACTION_NAME + \
                         ": too many arguments %d, maximum is %d" % \
                         (len(action_args), 1))
    tab_values = []
    if len(action_args) == 1:
        ruleset_name = action_args[0]
        try:
            (ruleset_h, h_for) = db.checksum(ruleset_name)
            for rulename in h_for:
                tab_values.append([ruleset_name,
                                   rulename,
                                   h_for[rulename].hexdigest()])
            tab_values.append([ruleset_name, FILL_EMPTY_ENTRY, \
                               ruleset_h.hexdigest()])
        except UnknownRuleSet as urs:
            _LOGGER.error(DBCHECKSUM_ACTION_NAME + str(urs))
            return 1

    else:
        rules_map = db.get_rules_map()
        for ruleset_name in rules_map:
            (ruleset_h, h_for) = db.checksum(ruleset_name)
            for rulename in h_for:
                tab_values.append([ruleset_name,
                                   rulename,
                                   h_for[rulename].hexdigest()])
            tab_values.append([ruleset_name, FILL_EMPTY_ENTRY, 
                               ruleset_h.hexdigest()])
    _LOGGER.output(smart_display(CHECKSUM_HEADER,
                                 tab_values,
                                 vsep=u' | '))
Esempio n. 11
0
def _report_error(execution):
    """
    Display the 'error' type of report
    """
    actions_nb = len(execution.model.actions)
    error_actions = execution.error_actions.values()
    error_actions_nb = len(error_actions)
    try:
        percentage = (float(error_actions_nb) / actions_nb) * 100
    except ZeroDivisionError:
        percentage = 0.0

    _LOGGER.output("\nErrors: %d (%2.1f %%)\tLegend: " + \
                   "rDeps=reverse dependencies, RC=returned code",
                   error_actions_nb, percentage)
    tab_values = []
    # Sort by len() first then alphabetically so:
    # b1, b2, b20, c1, c2, c10, c100 appears in that order
    sorted_list = sorted(error_actions,
                         key=lambda error_action: len(error_action.id))
    for error_action in sorted(sorted_list,
                               key=lambda error_action: error_action.id):
        rdeps = error_action.next()
        rdeps_nb = len(rdeps)
        percentage = (float(rdeps_nb) / actions_nb) * 100
        nodeset = NodeSet()
        for rdep in error_action.next():
            if len(rdep) != 0:
                nodeset.add(rdep)
        tab_values.append([error_action.id, str(error_action.rc),
                           str(rdeps_nb), u"%2.1f" % percentage, str(nodeset)])
    output = smart_display([u"Id", u"RC",
                            u"#rDeps", u"%rDeps",
                            u"rDeps"],
                           tab_values, vsep=u" | ",
                           justify=[str.center, str.center,
                                    str.center, str.center,
                                    str.ljust])
    _LOGGER.output(output)
Esempio n. 12
0
def _report_model(a_model):
    """
    Display the 'model' type of report
    """
    actions = a_model.actions.values()
    actions_nb = len(actions)
    _LOGGER.output("Actions in Model: %d\tLegend: @=remote, Deps=Dependencies",
                   actions_nb)
    tab_values = []
    deps_total_nb = 0
    # Sort by len() first then alphabetically so:
    # b1, b2, b20, c1, c2, c10, c100 appears in that order
    sorted_list = sorted(actions, key=lambda action: len(action.id))
    for action in sorted(sorted_list, key=lambda action: action.id):
        nodeset = NodeSet()
        deps = action.all_deps()
        deps_total_nb += len(deps)
        for dep in deps:
            if len(dep) != 0:
                nodeset.add(dep)
        tab_values.append([action.id,
                           ("@" if action.remote else "")+action.component_set,
                           str(nodeset),
                           action.description])
    tab_values.append([HSEP, HSEP, HSEP, HSEP])
    try:
        average_deps = float(deps_total_nb) / actions_nb
    except ZeroDivisionError:
        average_deps = 0
    tab_values.append(["Average #Deps:", "-",
                       "%2.1f" % average_deps,
                       "-"])
    _LOGGER.output(smart_display([u"Id",
                                  u"[@]Component Set",
                                  u"Deps",
                                  u"Description"],
                                 tab_values, vsep=u" | ",
                                 left_align=[False, False, True, False]))
Esempio n. 13
0
 def testSpecifyMultipleColumns(self):
     output = smart_display([u"T1", u"T2", u"T3"],
                            [[u"d1.1", u"d2.1", u"d3.1"],
                             [u"d1.2", u"d2.2", u"d3.2"]],
                            columns_max={u'T2': REMOVE_UNSPECIFIED_COLUMNS,
                                         u'T3': REMOVE_UNSPECIFIED_COLUMNS})
     self.assertIsNone(re.search(r'^.*T1.*$', output,
                                 flags=re.MULTILINE),
                       output)
     self.assertIsNone(re.search(r'^.*d1.*$', output,
                                 flags=re.MULTILINE),
                       output)
     self.assertIsNotNone(re.search(r'^.*T2.*$', output,
                                    flags=re.MULTILINE),
                          output)
     self.assertIsNotNone(re.search(r'^.*d2.*$', output,
                                    flags=re.MULTILINE),
                          output)
     self.assertIsNotNone(re.search(r'^.*T3.*$', output,
                                 flags=re.MULTILINE),
                       output)
     self.assertIsNotNone(re.search(r'^.*d3.*$', output,
                                 flags=re.MULTILINE),
                       output)
Esempio n. 14
0
def _report_exec(execution):
    """
    Display the 'exec' type of report
    """
    header = [u"Id", u"Submitted Time",
              u"Started Time", u"Ended Time", u"Duration",
              u"RC", u"[@]Component Set"]
    executed_actions = execution.executed_actions.values()
    executed_actions_nb = len(executed_actions)
    model_actions_nb = len(execution.model.actions)
    try:
        percentage = (float(executed_actions_nb) / model_actions_nb) * 100
    except ZeroDivisionError:
        percentage = 0.0
    _LOGGER.output("\nExecuted Actions: %d (%2.1f %%)\tLegend:" + \
                       " @=remote, RC=Returned Code",
                   executed_actions_nb, percentage)
    tab_values = []
    # Initialise with worst case so they will get replace by first
    # occurence
    if executed_actions_nb > 0:
        first_started = min(executed_actions,
                            key=lambda execaction: \
                                execaction.started_time).started_time
        last_started = max(executed_actions,
                           key=lambda execaction: \
                               execaction.started_time).started_time
        first_ended = min(executed_actions,
                          key=lambda execaction: \
                              execaction.ended_time).ended_time
        last_ended =  max(executed_actions,
                          key=lambda execaction: \
                              execaction.ended_time).ended_time
    for execaction in sorted(executed_actions,
                                  key=lambda execaction: \
                                      execaction.submitted_time):

        submit = dt.fromtimestamp(execaction.submitted_time)
        start  = dt.fromtimestamp(execaction.started_time)
        end = dt.fromtimestamp(execaction.ended_time)
        duration = end - start
        submitted_time = submit.strftime(_TIME_FORMAT)
        started_time = start.strftime(_TIME_FORMAT)
        ended_time = end.strftime(_TIME_FORMAT)
        cs_label = ("@" if execaction.remote else "") + execaction.component_set

        tab_values.append([execaction.id,  submitted_time,
                           started_time, ended_time, str(duration),
                           str(execaction.rc), cs_label])
    try:
        seq_total_time = _compute_seq_total_time(execution)
        average_duration = seq_total_time // executed_actions_nb
        tab_values.append([FILL_EMPTY_ENTRY] * len(header))
        tab_values.append(["First:", "-",
                           str(dt.fromtimestamp(first_started)\
                                   .strftime(_TIME_FORMAT)),
                           str(dt.fromtimestamp(first_ended)\
                                   .strftime(_TIME_FORMAT)),
                           "-", "-", "-"])
        tab_values.append(["Last:", "-",
                           str(dt.fromtimestamp(last_started)\
                                   .strftime(_TIME_FORMAT)),
                           str(dt.fromtimestamp(last_ended)\
                                   .strftime(_TIME_FORMAT)),
                           "-", "-", "-"])
        tab_values.append(["Average:", "-", "-", "-",
                           str(average_duration),
                           "-", "-"])
    except ZeroDivisionError:
        average_duration = 0
    output = smart_display(header,
                           tab_values, vsep=u' | ',
                           justify=[str.center, str.center,
                                       str.center, str.center, str.center,
                                       str.center, str.ljust])
    _LOGGER.output(output)