Esempio n. 1
0
def test_explanation(formula: str,
                     x: torch.Tensor,
                     y: torch.Tensor,
                     target_class: int,
                     mask: torch.Tensor = None,
                     threshold: float = 0.5):
    """
    Tests a logic formula.

    :param formula: logic formula
    :param x: input data
    :param y: input labels (MUST be one-hot encoded)
    :param target_class: target class
    :param mask: sample mask
    :param threshold: threshold to get concept truth values
    :return: Accuracy of the explanation and predictions
    """

    if formula in ['True', 'False', ''] or formula is None:
        return 0.0, None

    else:
        assert len(y.shape) == 2
        y = y[:, target_class]
        concept_list = [f"feature{i:010}" for i in range(x.shape[1])]
        # get predictions using sympy
        explanation = to_dnf(formula)
        fun = lambdify(concept_list, explanation, 'numpy')
        x = x.cpu().detach().numpy()
        predictions = fun(*[x[:, i] > threshold for i in range(x.shape[1])])
        # get accuracy
        accuracy = f1_score(y[mask], predictions[mask], average='macro')
        return accuracy, predictions
Esempio n. 2
0
def simplify_using_expression_and_negation(node, expr0, expr1, bnet):
    """Simplify the expression bnet by substituting the value for node given by
    node = expr1 = !expr0 (does not check that expr1=!expr0)

    Parameters
    ----------
    node : str
        Name of node to substitute
    expr0 : str
        Expression to substitute for !node
    expr1 : str
        Expression to substitute for node
    bnet : str
        BNET expression in which to perform the substitutions.

    Returns
    -------
    str
        Simplified BNET expression after substitutions are performed.

    """

    neg = "!" + node
    crule = re.sub(rf'\b{neg}\b', "(" + expr0 + ")", bnet)
    crule = re.sub(rf'\b{node}\b', "(" + expr1 + ")", crule)
    crule = sm_format.bnet2sympy(crule)
    crule = str(sympy.to_dnf(sympy.simplify(sympy.sympify(crule))))
    crule = sm_format.sympy2bnet(crule)
    return crule
Esempio n. 3
0
def reaction_to_gene_deletions(reaction_set, rules):    
    rule_set = [rules[r_id] for r_id in reaction_set]
    merged_rule = to_dnf(Not(Or(*rule_set)))
    
    gene_sets = []
    if type(merged_rule) is Or: 
        for sub_expr in merged_rule.args:
            gene_set = []
            if type(sub_expr) is And:
                gene_set = [str(not_gene.args[0]) if type(not_gene) is Not else 'error' 
                            for not_gene in sub_expr.args]
            else:
                gene_set = [str(sub_expr.args[0]) if type(sub_expr) is Not else 'error']
            gene_set = tuple(sorted(set(gene_set)))
            gene_sets.append(gene_set)
    elif type(merged_rule) is And:
        gene_set = [str(not_gene.args[0]) if type(not_gene) is Not else 'error' 
                    for not_gene in merged_rule.args]
        gene_set = tuple(sorted(set(gene_set)))
        gene_sets.append(gene_set)
    else:
        gene_set = [str(merged_rule.args[0]) if type(merged_rule) is Not else 'error']
        gene_set = tuple(sorted(set(gene_set)))
        gene_sets.append(gene_set)            
    return gene_sets
Esempio n. 4
0
def expand_gpr(gpr):
    from sympy import expand
    formatted_gpr = gpr2expr(gpr)
    simplified_formatted_gpr = sympy.to_dnf(formatted_gpr)
    expanded_gpr = expr2gpr(simplified_formatted_gpr)

    return expanded_gpr
 def _convert_to_dnf(self):
     self._debug(f'Converting to DNF ...',
                 header=self._middle_code.full_name)
     exp = simplify_logic(self._middle_code.exp,
                          form='dnf',
                          force=self._deep_simplify)
     self._middle_code.exp = to_dnf(exp)
     self._debug(f'Converted to DNF', header=self._middle_code.full_name)
Esempio n. 6
0
    def rank_Cdnf(self, Query, documents, d_index):
        Q_t = self.get_terms(Query)
        Q = str(to_dnf(Query, simplify=True))
        Q = Q.split("|")
        product = 1

        for cc in Q:
            product = dot(self.rank_cc(cc, documents, Q_t, d_index), product)
            product = round(product, 3)
        return rest(1, product)
Esempio n. 7
0
def getDNFFmla_v2( ridSidToPredicate_map ) :

  # ////////////////////////////////////////// #
  # convert to sympy formula
  ruleConjuncts_map = {}
  for key in ridSidToPredicate_map :
    predID = ridSidToPredicate_map[ key ]
    key    = key.split( "," )
    rid    = key[0]
    sid    = key[1]
    sign   = None

    if "_NEG_" in sid :
      sid  = sid.replace( "_NEG_", "" )
      sign = "_NEG_"

    if rid in ruleConjuncts_map :
      currConjunct_str = ruleConjuncts_map[ rid ]
      if sign :
        ruleConjuncts_map[ rid ] = currConjunct_str + " & ~( " + predID + " )"
      else :
        ruleConjuncts_map[ rid ] = currConjunct_str + " & " + predID
    else :
      if sign :
        ruleConjuncts_map[ rid ] = "~( " + predID + " )"
      else :
        ruleConjuncts_map[ rid ] = predID

  # ----------------------------------------------------------------- #
  # decrease work on sympy call by negating clauses individually 
  # and taking the conjunction of the negated clauses:
  #

  # apply negations and DNF simplifications on clauses individually
  for rid in ruleConjuncts_map :
    conjunct_str = ruleConjuncts_map[ rid ]
    ruleConjuncts_map[ rid ] = sympy.to_dnf( "~ ( " + conjunct_str + " )" )

  # build negative DNF fmla
  # by AND'ing together negated clauses.
  negFmla = None
  for key in ruleConjuncts_map :
    if negFmla :
      negFmla += " & " + str( ruleConjuncts_map[key] )
    else :
      negFmla = str( ruleConjuncts_map[key] )

  print "negFmla = " + negFmla

  ## simplify DNF
  #simplified_negFmla = sympy.to_dnf( negFmla )
  #return simplified_negFmla

  return negFmla
Esempio n. 8
0
def getDNFFmla_v1( ridSidToPredicate_map ) :

  # ////////////////////////////////////////// #
  # convert to sympy formula
  ruleConjuncts_map = {}
  for key in ridSidToPredicate_map :
    predID = ridSidToPredicate_map[ key ]
    key    = key.split( "," )
    rid    = key[0]
    sid    = key[1]
    sign   = None
    if "_NEG_" in sid :
      sid  = sid.replace( "_NEG_", "" )
      sign = "_NEG_"

    if rid in ruleConjuncts_map :
      currConjunct_str = ruleConjuncts_map[ rid ]
      if sign :
        ruleConjuncts_map[ rid ] = currConjunct_str + " & ~( " + predID + " )"
      else :
        ruleConjuncts_map[ rid ] = currConjunct_str + " & " + predID
    else :
      if sign :
        ruleConjuncts_map[ rid ] = "~( " + predID + " )"
      else :
        ruleConjuncts_map[ rid ] = predID

  # add parens
  for rid in ruleConjuncts_map :
    conjunct_str = ruleConjuncts_map[ rid ]
    ruleConjuncts_map[ rid ] = "( " + conjunct_str + " )"

  # build positive DNF fmla
  posFmla = None
  for key in ruleConjuncts_map :
    if posFmla :
      posFmla += " | " + ruleConjuncts_map[key]
    else :
      posFmla = ruleConjuncts_map[key]

  # ----------------------------------------------------------- #
  # negate sympy formulas and simplify into DNF

  # negate DNF
  negFmla = "~( " + posFmla + " )"

  print "negFmla = " + negFmla

  # simplify DNF
  simplified_negFmla = sympy.to_dnf( negFmla )

  print "simplified_negFmla = " + str( simplified_negFmla )

  return simplified_negFmla
Esempio n. 9
0
    def annotate(self, estnltk_text, label):
        # list of all words in grammar
        words = []

        # list of all operations
        operations = []

        nodes = [self.root]

        while nodes:
            node = nodes.pop()
            if _is_word(node):
                words.append(node)
            elif _is_operation(node):
                nodes.extend(node.nodes)
                operations.append(node)
        words_to_symbols = dict(
            zip(words, sympy.symbols([word.id for word in words])))
        symbols_to_words = dict((v, k) for (k, v) in words_to_symbols.items())
        expression = sympy.to_dnf(node_to_symbol(words_to_symbols, self.root),
                                  simplify=True)

        def match(word_object, estnltk_word):
            assert isinstance(word_object, Word)
            assert isinstance(estnltk_word, Text)

            for param, value in word_object.params.items():

                if set(value).issubset(
                        set([i[param] for i in estnltk_word.analysis[0]])):
                    continue
                else:
                    return False
            return True

        words_to_matches = collections.defaultdict(set)
        e_words = estnltk_text.split_by_words()

        spans = [{'start': a, 'end': b} for a, b in estnltk_text.word_spans]

        for _ind, e_word in enumerate(e_words):
            for word in words:
                if match(word, e_word):
                    words_to_matches[word].add(_ind)

        if words_to_matches:
            layer = [spans[i] for i in set.union(*words_to_matches.values())]
        else:
            layer = []

        estnltk_text[label] = layer
Esempio n. 10
0
def complexity(formula: str, to_dnf: bool = False) -> float:
    """
    Estimates the complexity of the formula.

    :param formula: logic formula.
    :param to_dnf: whether to convert the formula in disjunctive normal form.
    :return: The complexity of the formula.
    """
    if formula != "" and formula is not None:
        if to_dnf:
            formula = str(sympy.to_dnf(formula))
        return np.array([len(f.split(' & '))
                         for f in formula.split(' | ')]).sum()
    return 0
Esempio n. 11
0
def parse_gpr(gpr):
    """
    Parses a string gpr into a sympy expression

    :param gpr:
    :return:
    """

    #assert(isinstance(gpr,str))
    assert (isevaluable(gpr))

    bool_gpr = multiple_replace(gpr, GPR2EXPR_SUBS_DICT, ignore_case=True)
    sym_gpr = sympy.to_dnf(bool_gpr)

    return sym_gpr
Esempio n. 12
0
def parse_gpr_rule(rule, prefix=None):

    if not rule:
        return None

    rule = rule.replace('(', '( ').replace(')', ' )')

    def replacement(token):
        if token.lower() == 'and':
            return '&'
        elif token.lower() == 'or':
            return '|'
        elif token == '(' or token == ')':
            return token
        elif prefix is not None and not token.startswith(prefix):
            return prefix + sanitize_id(token)
        else:
            return sanitize_id(token)

    rule = ' '.join(map(replacement, rule.split()))

    expr = parse_expr(rule)

    if not is_dnf(expr):
        expr = to_dnf(expr)

    gpr = GPRAssociation()

    if type(expr) is Or:
        for sub_expr in expr.args:
            protein = Protein()
            if type(sub_expr) is And:
                protein.genes = [str(gene) for gene in sub_expr.args]
            else:
                protein.genes = [str(sub_expr)]
            gpr.proteins.append(protein)
    elif type(expr) is And:
        protein = Protein()
        protein.genes = [str(gene) for gene in expr.args]
        gpr.proteins = [protein]
    else:
        protein = Protein()
        protein.genes = [str(expr)]
        gpr.proteins = [protein]


    return gpr
Esempio n. 13
0
def parse_gpr_rule(rule, prefix=None):

    if not rule:
        return None

    rule = rule.replace('(', '( ').replace(')', ' )')

    def replacement(token):
        if token.lower() == 'and':
            return '&'
        elif token.lower() == 'or':
            return '|'
        elif token == '(' or token == ')':
            return token
        elif prefix is not None and not token.startswith(prefix):
            return prefix + sanitize_id(token)
        else:
            return sanitize_id(token)

    rule = ' '.join(map(replacement, rule.split()))

    expr = parse_expr(rule)

    if not is_dnf(expr):
        expr = to_dnf(expr)

    gpr = GPRAssociation()

    if type(expr) is Or:
        for sub_expr in expr.args:
            protein = Protein()
            if type(sub_expr) is And:
                protein.genes = [str(gene) for gene in sub_expr.args]
            else:
                protein.genes = [str(sub_expr)]
            gpr.proteins.append(protein)
    elif type(expr) is And:
        protein = Protein()
        protein.genes = [str(gene) for gene in expr.args]
        gpr.proteins = [protein]
    else:
        protein = Protein()
        protein.genes = [str(expr)]
        gpr.proteins = [protein]


    return gpr
Esempio n. 14
0
    def _handle(self, event: pygame.event.Event):
        if event.type == pygame.MOUSEBUTTONDOWN:
            if self.__viewer.handle(event):
                return True

            if in_bounds(style.get('.infobar-page-simplify', 'rect'), event.pos):
                scheme = ProjectManager.selected_project.selected_scheme
                res = sympy.to_dnf(scheme.symbol, simplify=True)

                index = ProjectManager.selected_project.order.index(ProjectManager.selected_project.selected)
                index += 1
                if index == len(ProjectManager.selected_project.order):
                    index = None
                simplified = Scheme.from_string(ProjectManager.selected_project, str(res))
                simplified.sub = True

                ProjectManager.selected_project.add_scheme(simplified, index=index)
Esempio n. 15
0
    def rules_conditions_union(self, rules):
        """
        Returns the union (OR) of the conditions of rules.

        We have to go through this alembic'ed function in order to merge
        ranges (for example port ranges) as an high level rule (for example,
        allow port 1 to 31766) are broken down into a multitude of hard to read
        flow rules. Return them in DNF notation, as it looks nicer to read.
        """
        ored, range_accumulator = self.rule_conditions_or(rules)

        for key_name, ranges in range_accumulator.items():
            closed_interval = close_intervals(sm.Union(*ranges))

            ored = ored.replace(
                sm.Symbol(key_name),
                closed_interval.as_relational(
                    sm.Symbol(key_name.partition("__range_")[2])))
        return sm.to_dnf(ored)
Esempio n. 16
0
 def simplify(expression):
     simplified_expr = sp.to_dnf(parse_expr(expression, evaluate=False),
                                 True)
     return srepr(simplified_expr)
Esempio n. 17
0
def spec_graph_to_metagraph(specification_edges):
    print(
        "\n\n###############################################################################"
    )
    print("Turning specification graph into metagraph")
    print(
        "###############################################################################"
    )

    workflow_variable_set = set()
    workflow_propositions_set = set()
    workflow_metagraph_edges = []

    # Simplify boolean expressions (Use simpy) https://stackoverflow.com/questions/52416781/how-to-simplify-these-boolean-statements
    for src, dst, attributes in specification_edges:
        if glob_verbose >= 2:
            print("Edge: {} {} {}".format(src, dst, attributes))

        # Add src and dst to variable set if they are not present yet
        workflow_variable_set.update(src)
        workflow_variable_set.update(dst)

        # Parse policy into expression for sympy
        edge_policy = parse_expr(attributes)
        if glob_verbose >= 2:
            print("Edge policy: {}".format(edge_policy))

        # Convert policy to Disjunctive Normal Form (DNF)
        # I think we don't want to simplify the expression for the comparison
        # since it is not simplified in the metagraph generated from the policy
        # https://en.wikipedia.org/wiki/Disjunctive_normal_form
        # https://docs.sympy.org/latest/modules/logic.html
        # https://docs.sympy.org/latest/modules/parsing.html
        edge_policy_dnf = to_dnf(edge_policy, simplify=False)
        if glob_verbose >= 2:
            print("DNF: {}".format(edge_policy_dnf))

        # Metagraph nodes
        # Each element in metagraph_nodes is the proposition part of a node in the metagraph
        metagraph_nodes = str(edge_policy_dnf).split("|")
        if glob_verbose >= 2:
            print("Metagraph nodes: {}".format(metagraph_nodes))

        # Policy elements in nodes
        # Each element is a part of the propositions_set
        for node_propositions in metagraph_nodes:
            policy_elements = node_propositions.split('&')
            policy_elements = [
                policy_element.strip().lstrip('(').rstrip(')')
                for policy_element in policy_elements
            ]  # Remove leading and trailing whitespaces, plus leading and trailing parentheses

            # Add policy_elements to propositions_set
            for index, policy_element in enumerate(policy_elements):
                # Add ')' back for equalities
                if 'Eq' in policy_element:
                    policy_element = policy_element + ')'
                    policy_elements[index] = policy_elements[index] + ')'
                workflow_propositions_set.add(policy_element)
            workflow_metagraph_edges.append(
                Edge(src, dst, attributes=policy_elements))

            if glob_verbose >= 2:
                print("Policy elements: {}".format(policy_elements))

        if glob_verbose >= 2:
            print("\n")

    if glob_verbose >= 1:
        print("Variable set: {}".format(workflow_variable_set))
        print("Propositions set: {}\n".format(workflow_propositions_set))
        print("Metagraph edges: {}\n".format(workflow_metagraph_edges))

    # Create workflow metagraph
    specification_metagraph = ConditionalMetagraph(workflow_variable_set,
                                                   workflow_propositions_set)
    specification_metagraph.add_edges_from(workflow_metagraph_edges)

    if glob_verbose >= 1:
        print("Specification metagraph\n{}\n".format(
            repr(specification_metagraph)))

    if glob_verbose >= 2:
        print("Specification metagraph edges")
        print("{} {}".format("INVERTEX", "OUTVERTEX"))
        for edge in specification_metagraph.edges:
            print("{} {}".format(list(edge.invertex), list(edge.outvertex)))

    return specification_metagraph
Esempio n. 18
0
    def query(self):
        # list of all words in grammar
        words = []

        # list of all operations
        operations = []

        nodes = [self.root]

        while nodes:
            node = nodes.pop()
            if _is_word(node):
                words.append(node)
            elif _is_operation(node):
                nodes.extend(node.nodes)
                operations.append(node)
        words_to_symbols = dict(
            zip(words, sympy.symbols([word.id for word in words])))
        symbols_to_words = dict((v, k) for (k, v) in words_to_symbols.items())

        expression = sympy.to_dnf(node_to_symbol(words_to_symbols, self.root),
                                  simplify=True)

        if isinstance(expression, sympy.And):
            # We have no outer Or - the query is a single And
            main_query = {
                'query': {
                    'bool': {
                        'must': [
                            word_to_query(word)
                            for word in (symbols_to_words[i]
                                         for i in expression.args)
                        ]
                    }
                }
            }
        elif isinstance(expression, sympy.Or):
            query = []
            for arg in (expression.args):
                if isinstance(arg, sympy.And):
                    query.append({
                        'bool': {
                            'must': [
                                word_to_query(word)
                                for word in (symbols_to_words[i]
                                             for i in arg.args)
                            ]
                        }
                    })
                elif isinstance(arg, sympy.Symbol):
                    query.append(word_to_query(symbols_to_words[arg]))

            main_query = {
                'query': {
                    'bool': {
                        'should': query,
                        'minimum_number_should_match': 1
                    }
                }
            }
        elif isinstance(expression, sympy.Symbol):
            main_query = {'query': word_to_query(symbols_to_words[expression])}

        else:
            raise AssertionError("Don't know what I got or why.")

        main_query['fields'] = ['estnltk_text_object']
        return main_query
Esempio n. 19
0
def main(verbose, workflow, error_rate):
    global glob_verbose
    glob_verbose = verbose

    print("\n\n###############################################################################")
    print("Loading workflow specification from file")
    print("###############################################################################")

    with open(workflow, 'r') as workflow_file:
        workflow_edges = workflow_file.readlines()
        workflow_edges = [(set(src.lstrip('{').rstrip('}').split(', ')), set(dst.lstrip('{').rstrip('}').split(', ')), attributes) for src, dst, attributes in (edge.rstrip().split(';') for edge in workflow_edges)]

    if glob_verbose >= 1:
        print("Edges")
        for edge in workflow_edges:
            print(edge)


    print("\n\n###############################################################################")
    print("Turning workflow graph into metagraph")
    print("###############################################################################")

    workflow_variable_set = set()
    workflow_propositions_set = set()
    workflow_metagraph_edges = []

    # Simplify boolean expressions (Use simpy) https://stackoverflow.com/questions/52416781/how-to-simplify-these-boolean-statements
    for src, dst, attributes in workflow_edges:
        if glob_verbose >= 2:
            print("Edge: {} {} {}".format(src, dst, attributes))

        # Add src and dst to variable set if they are not present yet
        workflow_variable_set.update(src)
        workflow_variable_set.update(dst)


        # Parse policy into expression for simpy
        edge_policy = parse_expr(attributes)
        if glob_verbose >= 2:
            print("Edge policy: {}".format(edge_policy))

        # Convert policy to Disjunctive Normal Form (DNF)
        # I think we don't want to simplify the expression for the comparison
        # since it is not simplified in the metagraph generated from the policy
        # https://en.wikipedia.org/wiki/Disjunctive_normal_form
        # https://docs.sympy.org/latest/modules/logic.html
        # https://docs.sympy.org/latest/modules/parsing.html
        edge_policy_dnf = to_dnf(edge_policy, simplify=False)
        if glob_verbose >= 2:
            print("DNF: {}".format(edge_policy_dnf))


        # Metagraph nodes
        # Each element in metagraph_nodes is the proposition part of a node in the metagraph
        metagraph_nodes = str(edge_policy_dnf).split("|")
        if glob_verbose >= 2:
            print("Metagraph nodes: {}".format(metagraph_nodes))

        # Policy elements in nodes
        # Each element is a part of the propositions_set
        for node_propositions in metagraph_nodes:
            policy_elements = node_propositions.split('&')
            policy_elements = [policy_element.strip().lstrip('(').rstrip(')') for policy_element in policy_elements] # Remove leading and trailing whitespaces, plus leading and trailing parentheses

            # Add policy_elements to propositions_set
            for index, policy_element in enumerate(policy_elements):
                # Add ')' back for equalities
                if 'Eq' in policy_element:
                    policy_element = policy_element + ')'
                    policy_elements[index] = policy_elements[index] + ')'
                workflow_propositions_set.add(policy_element)
            workflow_metagraph_edges.append(Edge(src, dst, attributes=policy_elements))

            if glob_verbose >= 2:
                print("Policy elements: {}".format(policy_elements))

        if glob_verbose >= 2:
            print("\n")


    if glob_verbose >= 4:
        print("Variable set: {}".format(workflow_variable_set))
        print("Propositions set: {}\n".format(workflow_propositions_set))
        print("Metagraph edges: {}\n".format(workflow_metagraph_edges))

    # Create workflow metagraph
    print("Creating workflow metagraph")
    workflow_metagraph = ConditionalMetagraph(workflow_variable_set, workflow_propositions_set)
    workflow_metagraph.add_edges_from(workflow_metagraph_edges)

    if glob_verbose >= 4:
        print("Policy metagraph\n{}\n".format(repr(workflow_metagraph)))

    if glob_verbose >= 4:
        print("Workflow metagraph edges")
        print("{} {}".format("INVERTEX", "OUTVERTEX"))
        for edge in workflow_metagraph.edges:
            print("{} {}".format(list(edge.invertex), list(edge.outvertex)))


    # For error generation
    number_of_expressions = 0 # How many expressions are there in the metagraph
    for edge in workflow_metagraph.edges:
        number_of_expressions += len(list(edge.invertex)) + len(list(edge.outvertex))
    expressions_map = [0] * number_of_expressions

    if error_rate > 0.0:
        print("\n\n###############################################################################")
        print("Generating errors")
        print("###############################################################################")

        print("Number of expressions in metagraph: {}".format(number_of_expressions))

        number_of_errors = int(round(number_of_expressions * error_rate))
        print("Number of errors to generate: {}".format(number_of_errors))

        if glob_verbose >= 4:
            print(expressions_map)

        error_indices = random.sample(range(0, number_of_expressions), number_of_errors)
        if glob_verbose >= 4:
            print(error_indices)
        if glob_verbose >= 1:
            print("Number of errors indices: {}".format(len(error_indices)))

        for error_index in error_indices:
            expressions_map[error_index] = 1
        if glob_verbose >= 4:
            print(expressions_map)


    print("\n\n###############################################################################")
    print("Generating policy")
    print("###############################################################################")

    # Generate output policy name
    if "manually-generated" in workflow:
        output_policy_name = "generated-rego-from-spec/generated-from-manual/" + workflow.split('.')[0].split('/')[-1] + "-" + str(error_rate).split('.')[0] + "-" + str(error_rate).split('.')[-1] + "-error.rego"
    elif "randomly-generated" in workflow:
        path_chunks = workflow.split("/")
        generated_rego_dir_path = "generated-rego-from-spec/generated-from-random/" + path_chunks[-2] + "-" + str(error_rate).split('.')[0] + "-" + str(error_rate).split('.')[-1] + "-error/"
        if not os.path.exists(generated_rego_dir_path):
            os.makedirs(generated_rego_dir_path)
        print("Rego dir path: {}".format(generated_rego_dir_path))

        # Determine file uid
        file_id = path_chunks[-1].split('.')[0].split('-')[0] + '-'
        if glob_verbose >= 2:
            print("File id: {}".format(file_id))
        random_rego_filenames = [filename for filename in os.listdir(generated_rego_dir_path) if filename.startswith(file_id)]
        if glob_verbose >= 2:
            print("Random rego filenames: {}".format(random_rego_filenames))
        if not random_rego_filenames: # Dir empty
            uid = "1"
        else:
            uids = []
            for random_rego_filename in random_rego_filenames:
                if random_rego_filename.startswith(file_id):
                    uids.append(int(random_rego_filename.split('.')[0].split('-')[-1]))
            max_uid = max(uids)
            uid = str(max_uid + 1)
        if glob_verbose >= 2:
            print("UID: {}".format(uid))

        output_policy_name = generated_rego_dir_path + path_chunks[-1].split('.')[0] + "-" + uid + ".rego"
    else:
        terminate_app(0) #TODO Handle error
    print("Output policy file: {}\n".format(output_policy_name))

    # Basic ABAC structure
    global policy
    policy = []
    policy.append("package istio.authz\n")
    policy.append("import input.attributes.request.http as http_request\n\n")
    policy.append("default allow = false\n\n")
    policy.append("user_name = parsed {\n")
    policy.append('  [_, encoded] := split(http_request.headers.authorization, " ")\n')
    policy.append('  [parsed, _] := split(base64url.decode(encoded), ":")\n')
    policy.append("}\n\n")

    # Generate attributes
    #TODO Improvement: Generate based on random tenure + for all 'real' user
    policy.append('user_attributes = {\n')
    policy.append('  "owner": {"tenure": 8},\n')
    policy.append('  "vfx-1": {"tenure": 3},\n')
    policy.append('  "vfx-2": {"tenure": 12},\n')
    policy.append('  "vfx-3": {"tenure": 7},\n')
    policy.append('  "color": {"tenure": 3},\n')
    policy.append('  "sound": {"tenure": 4},\n')
    policy.append('  "hdr": {"tenure": 5},\n')
    policy.append('}\n\n')


    # Getting attribute ranges for error generation:
    global comp_operators, method_range, tenure_range, time_range, prop_range, user_range, dst_range
    comp_operators = {"==", ">=", "<=", ">", "<"}
    method_range = set()
    tenure_range = set()
    time_range = set()
    prop_range = set()
    user_range = set()
    dst_range = set()
    for edge in workflow_metagraph.edges:
        # Invertex
        for attribute in list(edge.invertex):
            # Match equals
            if 'Eq' in attribute:
                policy_eq_range(attribute)

            # Match comparison operators
            elif '>=' in attribute:
                policy_comp_range(attribute)

            elif '<=' in attribute:
                policy_comp_range(attribute)

            elif '>' in attribute:
                policy_comp_range(attribute)

            elif '<' in attribute:
                policy_comp_range(attribute)

            elif 'is_' in attribute: # prop
                policy_prop_range(attribute)

            else: # username
                policy_user_range(attribute)

        # Outvertex
        for attribute in list(edge.outvertex):
            policy_dst_range(attribute)

    if glob_verbose >= 3:
        print("Operators : {}".format(list(comp_operators)))
        print("Method range : {}".format(list(method_range)))
        print("Tenure range : {}".format(list(tenure_range)))
        print("Time range : {}".format(list(time_range)))
        print("Prop range : {}".format(list(prop_range)))
        print("User range : {}".format(list(user_range)))
        print("Dst range : {}\n".format(list(dst_range)))


    # Filling policy
    print("Filling policy")
    global_expressions_index = 0 # Global index for error generation
    for edge in workflow_metagraph.edges:
        if glob_verbose >= 4:
            print(edge)

        user_attributes_set = False
        time_set = False
        policy.append('allow {\n')

        # Invertex
        for attribute in list(edge.invertex):
            # Match equals
            if 'Eq' in attribute:
                if expressions_map[global_expressions_index] == 0:
                    policy_eq_add(attribute)
                else:
                    policy_eq_err(attribute)

            # Match comparison operators
            elif '>=' in attribute:
                if expressions_map[global_expressions_index] == 0:
                    user_attributes_set, time_set = policy_ge_add(attribute, user_attributes_set, time_set)
                else:
                    user_attributes_set, time_set = policy_ge_err(attribute, user_attributes_set, time_set)

            elif '<=' in attribute:
                if expressions_map[global_expressions_index] == 0:
                    user_attributes_set, time_set = policy_le_add(attribute, user_attributes_set, time_set)
                else:
                    user_attributes_set, time_set = policy_le_err(attribute, user_attributes_set, time_set)

            elif '>' in attribute:
                if expressions_map[global_expressions_index] == 0:
                    user_attributes_set, time_set = policy_gt_add(attribute, user_attributes_set, time_set)
                else:
                    user_attributes_set, time_set = policy_gt_err(attribute, user_attributes_set, time_set)

            elif '<' in attribute:
                if expressions_map[global_expressions_index] == 0:
                    user_attributes_set, time_set = policy_lt_add(attribute, user_attributes_set, time_set)
                else:
                    user_attributes_set, time_set = policy_lt_err(attribute, user_attributes_set, time_set)

            elif 'is_' in attribute: # prop
                if expressions_map[global_expressions_index] == 0:
                    policy_prop_add(attribute)
                else:
                    policy_prop_err(attribute)

            else: # username
                if expressions_map[global_expressions_index] == 0:
                    policy_user_add(attribute)
                else:
                    policy_user_err(attribute)

            global_expressions_index += 1

        # Outvertex
        for attribute in list(edge.outvertex):
            if expressions_map[global_expressions_index] == 0:
                policy_dst_add(attribute)
            else:
                policy_dst_err(attribute)

            global_expressions_index += 1

        policy.append('}\n\n')


    # Writing policy to file
    with open(output_policy_name, 'w') as output_policy:
        output_policy.writelines(policy)
Esempio n. 20
0
print(x, ' ', y, ' ', z, ' ', xyz, ' f')
tmp = '(((' + x + ' & ' + y + ') | (' + x + ' & ~ ' + y + '))|((~ ' + x + ' | ' + y + ') & (' + z + ' | ~ ' + xyz + ') & (~ ' + x + ' | ~ ' + y + ') & (' + xyz + ' | ' + z + ')))'
elem = 0
while(elem < 16):
       tmp0 = mas[elem][0]
       tmp1 = mas[elem][1]
       tmp2 = mas[elem][2]
       tmp3 = mas[elem][3]
       f1 = (tmp0 and tmp1) or (tmp0 and not tmp1)or((not tmp0 or tmp2) and (tmp2 or not tmp3) and (not tmp0 or not tmp1) and (tmp3 or tmp2))
       if f1:
              tmp4 = 1
       else:
              tmp4 = 0
       print(tmp0, ' ', tmp1, ' ', tmp2, ' ', tmp3, ' ', tmp4)
       elem+=1
upr = str(sympy.to_dnf(tmp, simplify=True))


print('Выражение:', '\n', (tmp), '\n', sympy.to_dnf(tmp, simplify=True))


if len(upr) == 5:
       a = 1
elif len(upr) == 1:
       a = 0
elem = 0
if a == 1:
       print(upr[0], ' ', upr[4], '  f')
       mas1 = numpy.array([[0,0],[0,1],[1,0],[1,1]])
       while elem < 4:
              if mas1[elem][1] or mas1[elem][0] :
Esempio n. 21
0
def explain_class(
    model: torch.nn.Module,
    x: torch.tensor = None,
    concept_names: list = None,
    device: torch.device = torch.device('cpu')) -> str:
    """
    Generate the FOL formulas corresponding to the parameters of a psi network.

    :param model: pytorch model
    :param x: input samples to extract logic formulas.
    :param concept_names: list of names of the input features.
    :param device: cpu or cuda device.
    :return: Global explanation
    """

    weights, bias = _collect_parameters(model, device)
    assert len(weights) == len(bias)

    # count number of layers of the psi network
    n_layers = len(weights)
    fan_in = np.count_nonzero((weights[0])[0, :])
    n_features = np.shape(weights[0])[1]

    # create fancy feature names
    if concept_names is not None:
        assert len(
            concept_names
        ) == n_features, "Concept names need to be as much as network input nodes"
        feature_names = concept_names
    else:
        feature_names = list()
        for k in range(n_features):
            feature_names.append(f'feature{k:010}')

    # count the number of hidden neurons for each layer
    neuron_list = _count_neurons(weights)
    # get the position of non-pruned weights
    nonpruned_positions = _get_nonpruned_positions(weights, neuron_list)

    # neurons activation are calculated on real data
    x_real = x.numpy()

    # simulate a forward pass using non-pruned weights only
    predictions_r = list()
    input_matrices = list()
    for j in range(n_layers):
        X1 = [
            x_real[:, nonpruned_positions[j][i][0]]
            for i in range(neuron_list[j])
        ]
        weights_active = _get_nonpruned_weights(weights[j], fan_in)

        # with real data we calculate the predictions neuron by neuron
        # since the input to each neuron may differ (does not happen with truth table)
        y_pred_r = [
            _forward(X1[i], weights_active[i, :], bias[j][i])
            for i in range(neuron_list[j])
        ]
        y_pred_r = np.asarray(y_pred_r)
        x_real = np.transpose(y_pred_r)
        predictions_r.append(y_pred_r)
        input_matrices.append(np.asarray(X1) > 0.5)

    simplify = True
    formulas_r = None
    feature_names_r = feature_names
    for j in range(n_layers):
        formulas_r = list()
        for i in range(neuron_list[j]):
            formula_r = _compute_fol_formula(input_matrices[j][i],
                                             predictions_r[j][i],
                                             feature_names_r,
                                             nonpruned_positions[j][i][0],
                                             simplify=simplify,
                                             fan_in=fan_in)
            formulas_r.append(f'({formula_r})')
        # the new feature names are the formulas we just computed
        feature_names_r = formulas_r
    formulas_r = [
        str(to_dnf(formula, simplify=True, force=simplify))
        for formula in formulas_r
    ]

    return formulas_r[0]
def aut_modification(Q, Q0, delta):
    """Modifies the UBA automaton and outputs modified transition and bad transitions"""

    delta_mod = delta[:]
    delta_add = []  # transitions to be added
    delta_bad_all = []  # all bad transitions
    if len(Q0) != 1:
        error_flag = True
    else:
        error_flag = False
        q0 = Q0[0]

    for q in Q:

        trs_exist = [tr[1] for tr in delta_mod if tr[0] == q]
        trs_exist_toq0 = [
            tr[1] for tr in delta_mod if tr[0] == q if tr[2] == q0
        ]

        trs_exist = ' | '.join([
            ' | '.join(
                ['(' + ' & '.join(tr_cl.split(' ')) + ')' for tr_cl in tr])
            for tr in trs_exist
        ])
        trs_exist = trs_exist.replace('T', 'True')
        trs_exist_toq0 = ' | '.join([
            ' | '.join(
                ['(' + ' & '.join(tr_cl.split(' ')) + ')' for tr_cl in tr])
            for tr in trs_exist_toq0
        ])
        trs_exist_toq0 = trs_exist_toq0.replace('T', 'True')

        trs_complement = str(sp.simplify('~( ' + trs_exist + ' )'))
        # account for strange outputs of 'simplify'
        # can avoid this by using 'true' and false' instead
        if trs_complement == '-1':
            trs_complement = 'True'
        elif trs_complement == '-2':
            trs_complement = 'False'

        if trs_complement == 'True':
            delta_bad_all.append((q, ['T'], q0))
        elif trs_complement == 'False':
            pass
        else:
            delta_bad_all.append((q, trs_complement, q0))

        if q == q0:
            trs_add = 'True'
        elif trs_exist_toq0:
            trs_add = trs_exist_toq0 + ' | ' + trs_complement
        else:
            trs_add = trs_complement

        ind_rep = [
            ind for ind, tr in enumerate(delta_mod) if tr[0] == q
            if tr[2] == q0
        ]  # existing transition to q0 that should be replaced
        trs_add = str(sp.to_dnf(trs_add, True))
        if trs_add == 'True':
            add_flag = True
            trs_add_dnf = ['T']
        elif trs_add == 'False':
            add_flag = False
        else:
            add_flag = True
            trs_add = trs_add.split(' | ')
            trs_add_dnf = []
            for cl in trs_add:
                if cl[0] == '(':
                    trs_add_dnf.append(' '.join(cl[1:-1].split(' & ')))
                else:
                    trs_add_dnf.append(' '.join(cl.split(' & ')))
        # add complementary transitions
        if add_flag:
            if ind_rep:  # remove existing transition
                delta_add.append((q, trs_add_dnf, q0))
                del delta_mod[ind_rep[0]]
            else:
                delta_add.append((q, trs_add_dnf, q0))

    delta_mod.extend(delta_add)

    return (delta_mod, delta_bad_all, error_flag)