Beispiel #1
0
def main():

    # set up vtree and manager
    var_count = 4
    vtree_type = "right".encode()
    vtree = Vtree(var_count=var_count, vtree_type=vtree_type)
    manager = SddManager(vtree=vtree)

    x = [None] + [manager.literal(i) for i in range(1, 5)]

    # construct the term X_1 ^ X_2 ^ X_3 ^ X_4
    alpha = x[1] & x[2] & x[3] & x[4]

    # construct the term ~X_1 ^ X_2 ^ X_3 ^ X_4
    beta = ~x[1] & x[2] & x[3] & x[4]

    # construct the term ~X_1 ^ ~X_2 ^ X_3 ^ X_4
    gamma = ~x[1] & ~x[2] & x[3] & x[4]

    print("== before referencing:")
    print(f"  live sdd size = {manager.live_size()}")
    print(f"  dead sdd size = {manager.dead_size()}")

    # ref SDDs so that they are not garbage collected
    alpha.ref()
    beta.ref()
    gamma.ref()
    print("== after referencing:")
    print(f"  live sdd size = {manager.live_size()}")
    print(f"  dead sdd size = {manager.dead_size()}")

    # garbage collect
    manager.garbage_collect()
    print("== after garbage collection:")
    print(f"  live sdd size = {manager.live_size()}")
    print(f"  dead sdd size = {manager.dead_size()}")

    alpha.deref()
    beta.deref()
    gamma.deref()

    print("saving vtree & shared sdd ...")
    if not Path("output").is_dir():
        raise Exception(f"Directory 'output' does not exist")
    vtree.save_as_dot("output/shared-vtree.dot".encode())
    manager.shared_save_as_dot("output/shared.dot".encode())
Beispiel #2
0
def computeTensorWMC(node: SddNode, manager: SddManager, literal2OutputNeuron: dict,
                     weights: torch.tensor) -> torch.tensor:
    stack = list()
    nodesToTensors = dict()

    stack.append(node)

    while len(stack) > 0:

        top = stack[len(stack) - 1]
        if top not in nodesToTensors:

            if top.is_decision():

                noTensor = False
                for element in top.elements():
                    if element[0] not in nodesToTensors:
                        stack.append(element[0])
                        noTensor = True
                    if element[1] not in nodesToTensors:
                        stack.append(element[1])
                        noTensor = True

                if noTensor == False:
                    if useGPU:
                        result = torch.tensor([0.0], requires_grad=True).cuda()
                    else:
                        result = torch.tensor([0.0], requires_grad=True)
                    for element in top.elements():
                        result = result + nodesToTensors[element[0]] * nodesToTensors[element[1]]

                    nodesToTensors[top] = result
                    stack.pop()

            elif top.is_literal():
                literal = top.literal
                if literal < 0:
                    positive = manager.literal(-literal)
                    neuronIndex = literal2OutputNeuron[positive]
                    nodesToTensors[top] = 1 - weights[neuronIndex]
                else:
                    neuronIndex = literal2OutputNeuron[top]
                    nodesToTensors[top] = weights[neuronIndex]
                stack.pop()

            elif top.is_false():
                if useGPU:
                    nodesToTensors[top] = torch.tensor([0.0], requires_grad=True).cuda()
                else:
                    nodesToTensors[top] = torch.tensor([0.0], requires_grad=True)
                stack.pop()

            elif top.is_true():
                if useGPU:
                    nodesToTensors[top] = torch.tensor([1.0], requires_grad=True).cuda()
                else:
                    nodesToTensors[top] = torch.tensor([1.0], requires_grad=True)
                stack.pop()
        else:
            stack.pop()

    return nodesToTensors[node]