示例#1
0
def test_nodes_inside_boundary(get_digraph_with_cycle):
    hypothgraph, source, target = get_digraph_with_cycle

    # all nodes within boundary are accessible from source
    nodes_in_boundary_interior = boundary.in_boundary_interior(hypothgraph, source, target)
    for node_inside_boundary in nodes_in_boundary_interior:
        assert nx.has_path(hypothgraph, source, node_inside_boundary)
示例#2
0
def confidence_spectrum(hypothgraph,
                        source,
                        target,
                        normalized=False,
                        func_importance=def_func_import):
    spectrum = []

    # all nodes in the boundary interior
    interior = list(boundary.in_boundary_interior(hypothgraph, source, target))

    # from zero to full number of potential evidences
    for number_of_evidence_possibilities in range(len(interior) + 1):
        confidences = confidences_possibilities(
            hypothgraph,
            source,
            target,
            number_of_evidence_possibilities,
            normalized=normalized,
            func_importance=func_importance)

        spectrum.append(confidences)

        # mean_confidence = sum(confidences)/float(len(confidences))
        # mean_confidences.append(mean_confidence)

    return spectrum
示例#3
0
def test_nodes_on_boundary(get_digraph_with_cycle):
    hypothgraph, source, target = get_digraph_with_cycle

    nodes_on_boundary = boundary.on_boundary(hypothgraph, source, target)
    nodes_in_boundary_interior = list(boundary.in_boundary_interior(hypothgraph, source, target))

    for node_on_boundary in nodes_on_boundary:
        assert not node_on_boundary in nodes_in_boundary_interior
示例#4
0
def hypothgraph_to_dot(hypothgraph,
                       conf,
                       stream=sys.stdout,
                       show_boundary_interior=False,
                       show_evidenced=False):
    """
    (graph, hypoth_conf, stream) -> string > stream

    Args:
        - hypothgraph (networkx.DiGraph): Hypothesis graph
        - conf (hypo_conf.Hypoth_Conf): Hypothesis configuration

            - source, target (nodes): Causal endpoints
            - evidenced_nodes ([node...]): Evidenced nodes

        - stream (default: sys.stdout | file): Where to write the output
        - show_boundary_interior (default: False): Should we apply different style to boundary interior nodes?
        -show_evidenced (default: False): Should we apply different style to evidenced nodes?
    Returns:
        - (string -> stream): `dot` representation of the graph

    """
    # extract configuration of the confidence evaluation of a hypothesis
    source, target, evidenced_nodes = conf

    boundary_interior_nodes = list(
        boundary.in_boundary_interior(hypothgraph, source, target))

    stream.write('digraph g {\n')

    # Go through the nodes of the hypothesis graph and apply the right node
    # styles
    for (node, node_data) in hypothgraph.nodes_iter(data=True):
        node_str = node_style

        # precedence is `boundary_interior_node` < `endpoint_node` < `evidenced_node`
        if show_boundary_interior and node in boundary_interior_nodes:
            node_str = boundary_interior_node_style

        if node == source or node == target:
            node_str = endpoint_node_style

        if show_evidenced and node in evidenced_nodes:
            node_str = evidenced_node_style

        stream.write(node_str % (node, node_data['label']))

    # Go through the edges of the big graph, if the edge is in the small graph
    # then apply `small_edge_style`, otherwise use `big_edge_style`
    for (s, t, edge_data) in hypothgraph.edges_iter(data=True):
        edge_str = edge_style

        stream.write(edge_str % (s, t, edge_data['label']))

    stream.write('}\n')

    return None
示例#5
0
def test_partial_in_boundary(get_digraph_with_cycle):
    hypothgraph, source, target = get_digraph_with_cycle

    nodes_in_boundary_interior = list(boundary.in_boundary_interior(hypothgraph, source, target))
    nodes_partial_in_boundary = list(boundary.partial_nodes_boundary_interior(hypothgraph, source, target))

    assert len(nodes_partial_in_boundary) < len(nodes_in_boundary_interior)

    for partial_node in nodes_partial_in_boundary:
        assert partial_node in nodes_in_boundary_interior
示例#6
0
def relative_confidence_spectrum(big, small, source, target):
    evidenced_nodes = []
    iterative_hypoth_conf = Hypoth_Conf(source, target, evidenced_nodes)
    dict_confidences = {}

    # shortcut names for confidence functions
    func_conf = compute_confidence.confidence
    func_conf_norm = compute_confidence.normalized_confidence

    # compute max confidences in the small and in the big graphs
    max_small_confidence = compute_confidence.max_confidence(
        small, source, target)
    max_big_confidence = compute_confidence.max_confidence(big, source, target)

    # all nodes in the boundary interior of the SUBGRAPH
    nodes_boundary_interior = boundary.in_boundary_interior(
        small, source, target)

    # initial confidence values
    dict_confidences['sub_confidence_spectrum'] = [
        func_conf(small, iterative_hypoth_conf)
    ]
    dict_confidences['big_confidence_spectrum'] = [
        func_conf(big, iterative_hypoth_conf)
    ]
    dict_confidences['sub_confidence_normalized_spectrum'] = [
        func_conf_norm(small, iterative_hypoth_conf)
    ]
    dict_confidences['big_confidence_normalized_spectrum'] = [
        func_conf_norm(big, iterative_hypoth_conf)
    ]

    # add confidence values
    for boundary_interior_node in nodes_boundary_interior:
        evidenced_nodes.append(boundary_interior_node)
        iterative_hypoth_conf = Hypoth_Conf(source, target, evidenced_nodes)

        dict_confidences['sub_confidence_spectrum'].append(
            func_conf(small, iterative_hypoth_conf))
        dict_confidences['big_confidence_spectrum'].append(
            func_conf(big, iterative_hypoth_conf))
        dict_confidences['sub_confidence_normalized_spectrum'].append(
            func_conf_norm(small, iterative_hypoth_conf))
        dict_confidences['big_confidence_normalized_spectrum'].append(
            func_conf_norm(big, iterative_hypoth_conf))

    return dict_confidences
def test_relative_confidence_richer(get_sample_configuration):
    big, small, source, target = get_sample_configuration

    # generate hypothesis configurations according to the small hypothesis
    nothing_small = Hypoth_Conf(source, target, [])
    partial_small = Hypoth_Conf(
        source, target,
        list(boundary.partial_nodes_boundary_interior(small, source, target)))
    full_small = Hypoth_Conf(
        source, target,
        list(boundary.in_boundary_interior(small, source, target)))

    # relative confidence is the normalized confidence in the small
    # hypothegraph to the bigger hypothgraph
    nothing_conf_big = compute_confidence.normalized_confidence(
        big, nothing_small)
    nothing_conf_small = compute_confidence.normalized_confidence(
        small, nothing_small)
    relative_nothing = abs(nothing_conf_small - nothing_conf_big)

    partial_conf_big = compute_confidence.normalized_confidence(
        big, partial_small)
    partial_conf_small = compute_confidence.normalized_confidence(
        small, partial_small)
    relative_partial = abs(partial_conf_small - partial_conf_big)

    full_conf_big = compute_confidence.normalized_confidence(big, full_small)
    full_conf_small = compute_confidence.normalized_confidence(
        small, full_small)
    relative_full = abs(full_conf_small - full_conf_big)

    # with nothing evidenced our confidence should be 0 regardless of the size
    # of the hypothesis
    assert nothing_conf_big == nothing_conf_small == 0

    # with partial nodes smaller hypothesis confidence is at least as big as
    # the confidence in the bigger hypothesis, but usually is bigger
    assert partial_conf_small >= partial_conf_big

    # with full nodes evidenced in the smaller hypothesis, the same
    # configuration in the bigger hypothesis can only give a smaller confidence
    assert full_conf_small >= full_conf_big
示例#8
0
def get_sample_hypothgraph_and_configurations():
    hypothgraph = sample_graphs.sample_hypothgraph()

    source, target = hypoth_conf.generate_rich_endpoints(hypothgraph)

    partial_nodes = list(
        boundary.partial_nodes_boundary_interior(hypothgraph, source, target))
    full_nodes = list(
        boundary.in_boundary_interior(hypothgraph, source, target))

    nothing = Hypoth_Conf(source=source, target=target, evidenced_nodes=[])
    partial = Hypoth_Conf(source=source,
                          target=target,
                          evidenced_nodes=partial_nodes)
    full = Hypoth_Conf(source=source,
                       target=target,
                       evidenced_nodes=full_nodes)

    return {
        'hypothgraph': hypothgraph,
        'nothing': nothing,
        'partial_within': partial,
        'full_within': full
    }
示例#9
0
def confidences_possibilities(hypothgraph,
                              source,
                              target,
                              number_of_evidenced,
                              normalized=False,
                              func_importance=def_func_import):
    """
    (graph, node, node, list, bool) -> list[confidence_for_possibility]

    """
    # base case when we have no evidences, it is zero
    if number_of_evidenced == 0:
        return [0.0]

    # all nodes which potentially need to be evidenced
    interior_nodes = boundary.in_boundary_interior(hypothgraph, source, target)

    # we accumulate confidence values here
    confidences = []

    # which type of confidence are we computing, normalized or not
    func_confidence = partial(compute_confidence.confidence,
                              func_importance=func_importance)
    if normalized:
        func_confidence = partial(compute_confidence.normalized_confidence,
                                  func_importance=func_importance)

    # take combinations of interior nodes for the required number of evidenced
    # nodes
    for evidenced_possibility in it.combinations(interior_nodes,
                                                 number_of_evidenced):
        # make a hypothesis configuration
        new_conf = Hypoth_Conf(source, target, evidenced_possibility)
        confidences.append(func_confidence(hypothgraph, new_conf))

    return confidences
示例#10
0
def old_confidence_spectrum(hypothgraph, source, target, normalized=False):
    evidenced_nodes = []
    confidences = []
    iterative_hypoth_conf = Hypoth_Conf(source, target, evidenced_nodes)

    # which type of confidence are we computing, normalized or not
    func_confidence = compute_confidence.confidence
    if normalized:
        func_confidence = compute_confidence.normalized_confidence

    # initial confidence
    confidences.append(func_confidence(hypothgraph, iterative_hypoth_conf))

    # all nodes in the boundary interior
    nodes_boundary_interior = boundary.in_boundary_interior(
        hypothgraph, source, target)

    # add confidence values
    for boundary_interior_node in nodes_boundary_interior:
        evidenced_nodes.append(boundary_interior_node)
        iterative_hypoth_conf = Hypoth_Conf(source, target, evidenced_nodes)
        confidences.append(func_confidence(hypothgraph, iterative_hypoth_conf))

    return confidences