コード例 #1
0
def test_BayesianNetwork_stack():

    dim = np.array([10, 30, 10])
    alpha_k = 0.5
    sigma_k = np.exp(-1)
    c = np.exp(7)
    pi = 0.5
    p = 1.0
    BayesianNetwork_init = False

    BayesianNetwork_stack = BayesianNetwork(dim, alpha_k, sigma_k, c, pi, p,
                                            BayesianNetwork_init)

    x = torch.tensor(np.random.uniform(0, 5, (20, 10)), dtype=torch.float64)

    output_prova = BayesianNetwork_stack(x)

    mu_stack, rho_stack, w_stack = BayesianNetwork_stack.stack()

    assert ((mu_stack.shape == w_stack.shape) and
            (rho_stack.data.numpy().shape[0] == (10 * 30 + 30 + 30 * 10 + 10)))
コード例 #2
0
def test_BayesianNetwork_priorvariance():

    dim = np.array([10, 30, 10])
    alpha_k = 0.0
    sigma_k = np.exp(-1)
    c = np.exp(7)
    pi = 0.5
    p = 1.0
    BayesianNetwork_init = False

    BayesianNetwork_prova_prev = BayesianNetwork(dim, alpha_k, sigma_k, c, pi,
                                                 p, BayesianNetwork_init)
    BayesianNetwork_prova = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p,
        BayesianNetwork_init=BayesianNetwork_prova_prev)

    x = torch.tensor(np.random.uniform(0, 5, (20, 10)), dtype=torch.float64)
    y = torch.tensor(np.random.choice(range(0, 10), 20), dtype=torch.long)

    call = BayesianNetwork_prova_prev(x)
    output_prova = BayesianNetwork_prova(x)

    mu_prev, rho_prev, w_prev = BayesianNetwork_prova_prev.stack()
    print(mu_prev)
    loss_prior1 = BayesianNetwork_prova.get_gaussiandistancefromprior(
        mu_prev, mu_prev, rho_prev)
    # print( loss_prior1 )

    mu_prev.copy_(10 + mu_prev.clone().detach().zero_())
    rho_prev.copy_(10 + rho_prev.clone().detach().zero_())
    loss_prior2 = BayesianNetwork_prova.get_gaussiandistancefromprior(
        mu_prev, mu_prev, rho_prev)
    # print( loss_prior2 )

    assert loss_prior1.data.numpy() == loss_prior2.data.numpy()
コード例 #3
0
def test_stack_index():

    torch.manual_seed(0)
    np.random.seed(0)

    dim = np.array([10, 30, 10])
    L = 3
    alpha_k = 0.5
    sigma_k = np.exp(-1)
    c = np.exp(7)
    pi = 0.5
    p = 1.0
    BayesianNetwork_init = False

    BayesianNetwork_prev = BayesianNetwork(dim, alpha_k, sigma_k, c, pi, p,
                                           BayesianNetwork_init)
    BayesianNetwork_1 = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p=0.8,
        BayesianNetwork_init=BayesianNetwork_prev)

    x = torch.tensor(np.random.uniform(0, 5, (20, 10)), dtype=torch.float64)
    y = torch.tensor(np.random.choice(range(0, 10), 20), dtype=torch.long)

    torch.manual_seed(0)
    np.random.seed(0)
    call1 = BayesianNetwork_1(x)

    mu, rho, w = BayesianNetwork_1.stack()

    assert (mu[-(dim[L - 2] * dim[L - 1] + dim[L - 1]):] ==
            BayesianNetwork_1.Linear_layer[L - 2].mu.stack()).all()
コード例 #4
0
def test_evolution():

    torch.manual_seed(0)
    np.random.seed(0)

    dim = np.array([10, 30, 10])
    L = 3
    alpha_k = 0.5
    sigma_k = np.exp(-1)
    c = np.exp(7)
    pi = 0.5
    p = 1.0
    BayesianNetwork_init = False

    BayesianNetwork_prev = BayesianNetwork(dim, alpha_k, sigma_k, c, pi, p,
                                           BayesianNetwork_init)
    BayesianNetwork_1 = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p,
        BayesianNetwork_init=BayesianNetwork_prev)
    BayesianNetwork_2 = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p,
        BayesianNetwork_init=BayesianNetwork_prev)

    x = torch.tensor(np.random.uniform(0, 5, (20, 10)), dtype=torch.float64)
    y = torch.tensor(np.random.choice(range(0, 10), 20), dtype=torch.long)

    torch.manual_seed(0)
    np.random.seed(0)
    call1 = BayesianNetwork_1(x)
    torch.manual_seed(0)
    np.random.seed(0)
    call2 = BayesianNetwork_2(x)

    call_prova = BayesianNetwork_prev(x)

    mu_prev = {}
    rho_prev = {}

    with torch.no_grad():
        for i in range(0, L - 1):
            mu_i = {}
            rho_i = {}

            mu_i["weight"] = BayesianNetwork_prev.Linear_layer[
                i].mu.weight.data.clone().detach()
            mu_i["bias"] = BayesianNetwork_prev.Linear_layer[
                i].mu.bias.data.clone().detach()

            rho_i["weight"] = BayesianNetwork_prev.Linear_layer[
                i].rho.weight.data.clone().detach()
            rho_i["bias"] = BayesianNetwork_prev.Linear_layer[
                i].rho.bias.data.clone().detach()

            mu_prev[str(i)] = mu_i
            rho_prev[str(i)] = rho_i

    pi = 0.5
    alpha_k = 0.5
    sigma_k = np.exp(-1)
    c = np.exp(7)
    model = BayesianNetwork_1
    p = 1.0

    check1 = (BayesianNetwork_2.Linear_layer[0].mu.weight.data.numpy() ==
              BayesianNetwork_1.Linear_layer[0].mu.weight.data.numpy()).all()
    # print(check1)

    # print( pi, alpha_k, sigma_k, c, p )
    # print( BayesianNetwork_prova.pi, BayesianNetwork_prova.alpha_k, BayesianNetwork_prova.sigma_k, BayesianNetwork_prova.c, BayesianNetwork_prova.p )

    optimizer1 = optim.SGD(BayesianNetwork_1.parameters(), lr=0.001)
    optimizer1.zero_grad()

    loss_prior_met1 = first_likelihood(pi, mu_prev, alpha_k, sigma_k, c, model,
                                       mu_prev, rho_prev, p, L)
    loss_net1 = F.cross_entropy(call1, y)
    # print(loss_net1)
    # print(loss_prior_met1)
    loss1 = loss_net1  #+ loss_prior_met1

    loss1.backward()
    optimizer1.step()

    optimizer2 = optim.SGD(BayesianNetwork_2.parameters(), lr=0.001)
    optimizer2.zero_grad()

    mu_prev2, rho_prev2, w_prev2 = BayesianNetwork_prev.stack()
    mu2, rho2, w2 = BayesianNetwork_2.stack()
    # print(mu2)

    loss_prior_met2 = BayesianNetwork_2.get_gaussiandistancefromprior(
        mu_prev2, mu_prev2, rho_prev2)
    loss_net2 = F.cross_entropy(call2, y)
    # print(loss_net2)
    # print(loss_prior_met2)
    loss2 = loss_net2  #+ loss_prior_met2

    loss2.backward()
    BayesianNetwork_2.Linear_layer[
        0].mu.weight.data = BayesianNetwork_2.Linear_layer[
            0].mu.weight.data - 0.001 * BayesianNetwork_2.Linear_layer[
                0].mu.weight.grad.data

    # print( (np.abs(BayesianNetwork_1.Linear_layer[0].mu.weight.data.numpy() - BayesianNetwork_2.Linear_layer[0].mu.weight.data.numpy())).sum() )

    assert (np.abs((BayesianNetwork_2.Linear_layer[0].mu.weight.data.numpy() -
                    BayesianNetwork_1.Linear_layer[0].mu.weight.data.numpy()
                    ) < np.exp(-10))).all()
コード例 #5
0
def test_BayesianNetwork_prior_stack_evolution():

    dim = np.array([10, 30, 10])
    alpha_k = 0.5
    sigma_k = np.exp(-1)
    c = np.exp(7)
    pi = 0.5
    p = 1.0
    BayesianNetwork_init = False

    BayesianNetwork_prova_prev = BayesianNetwork(dim, alpha_k, sigma_k, c, pi,
                                                 p, BayesianNetwork_init)
    BayesianNetwork_prova1 = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p,
        BayesianNetwork_init=BayesianNetwork_prova_prev)
    BayesianNetwork_prova2 = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p,
        BayesianNetwork_init=BayesianNetwork_prova_prev)

    optimizer1 = optim.Adam(BayesianNetwork_prova1.parameters())
    optimizer1.zero_grad()
    optimizer2 = optim.Adam(BayesianNetwork_prova2.parameters())
    optimizer2.zero_grad()

    x = torch.tensor(np.random.uniform(0, 5, (20, 10)), dtype=torch.float64)
    y = torch.tensor(np.random.choice(range(0, 10), 20), dtype=torch.long)

    # for iter in range(1):
    call = BayesianNetwork_prova_prev(x)

    torch.manual_seed(0)
    np.random.seed(0)
    output_prova1 = BayesianNetwork_prova1(x)

    torch.manual_seed(0)
    np.random.seed(0)
    output_prova2 = BayesianNetwork_prova2(x)

    loss_network1 = F.cross_entropy(output_prova1, y)
    # print(loss_network1)
    loss_network2 = F.cross_entropy(output_prova2, y)
    # print(loss_network2)

    mu_prev, rho_prev, w_prev = BayesianNetwork_prova_prev.stack()
    loss_prior1 = BayesianNetwork_prova1.get_gaussiandistancefromprior(
        mu_prev, mu_prev, rho_prev)
    loss_prior2 = BayesianNetwork_prova2.get_gaussiandistancefromprior(
        mu_prev, mu_prev, rho_prev)

    mu2, rho2, w2 = BayesianNetwork_prova2.stack()

    loss1 = loss_network1 + loss_prior1
    loss2 = loss_network2 + loss_prior2 + (10 * mu2).sum()

    # print( BayesianNetwork_prova1.Linear_layer[0].mu.weight.data.numpy() - (BayesianNetwork_prova2.Linear_layer[0].mu.weight.data.numpy()) )

    loss1.backward()
    loss2.backward()

    # print( BayesianNetwork_prova1.Linear_layer[0].mu.weight.grad.data.numpy() - (BayesianNetwork_prova2.Linear_layer[0].mu.weight.grad.data.numpy()) )

    # optimizer1.step()
    # optimizer2.step()

    # print( BayesianNetwork_prova1.Linear_layer[0].mu.weight.grad.data.numpy() - (BayesianNetwork_prova2.Linear_layer[0].mu.weight.grad.data.numpy()) )

    assert (np.abs(
        (BayesianNetwork_prova1.Linear_layer[0].mu.weight.grad.data.numpy() -
         (BayesianNetwork_prova2.Linear_layer[0].mu.weight.grad.data.numpy() -
          10)) < np.exp(-5))).all()
コード例 #6
0
def test_prior_withdiffcomp():

    dim = np.array([10, 30, 10])
    L = 3
    alpha_k = 0.5
    sigma_k = np.exp(-1)
    c = np.exp(7)
    pi = 0.5
    p = 1.0
    BayesianNetwork_init = False

    BayesianNetwork_prova_prev = BayesianNetwork(dim, alpha_k, sigma_k, c, pi,
                                                 p, BayesianNetwork_init)
    BayesianNetwork_prova = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p,
        BayesianNetwork_init=BayesianNetwork_prova_prev)

    x = torch.tensor(np.random.uniform(0, 5, (20, 10)), dtype=torch.float64)

    call1 = BayesianNetwork_prova(x)
    call2 = BayesianNetwork_prova_prev(x)

    mu_prev = {}
    rho_prev = {}

    with torch.no_grad():
        for i in range(0, L - 1):
            mu_i = {}
            rho_i = {}

            mu_i["weight"] = BayesianNetwork_prova_prev.Linear_layer[
                i].mu.weight.data.clone().detach()
            mu_i["bias"] = BayesianNetwork_prova_prev.Linear_layer[
                i].mu.bias.data.clone().detach()

            rho_i["weight"] = BayesianNetwork_prova_prev.Linear_layer[
                i].rho.weight.data.clone().detach()
            rho_i["bias"] = BayesianNetwork_prova_prev.Linear_layer[
                i].rho.bias.data.clone().detach()

            mu_prev[str(i)] = mu_i
            rho_prev[str(i)] = rho_i

    pi = 0.5
    alpha_k = 0.5
    sigma_k = np.exp(-1)
    c = np.exp(7)
    model = BayesianNetwork_prova
    p = 1.0

    # print( pi, alpha_k, sigma_k, c, p )
    # print( BayesianNetwork_prova.pi, BayesianNetwork_prova.alpha_k, BayesianNetwork_prova.sigma_k, BayesianNetwork_prova.c, BayesianNetwork_prova.p )

    loss_prior_metold = first_likelihood(pi, mu_prev, alpha_k, sigma_k, c,
                                         model, mu_prev, rho_prev, p, L)

    mu_prev2, rho_prev2, w_prev2 = BayesianNetwork_prova_prev.stack()

    loss_prior_metnew = BayesianNetwork_prova.get_gaussiandistancefromprior(
        mu_prev2, mu_prev2, rho_prev2)

    # print(loss_prior_metnew - loss_prior_metold)

    assert (np.abs((loss_prior_metnew.data.numpy() -
                    loss_prior_metold.data.numpy()) < np.exp(-8)))
コード例 #7
0
def test_BayesianNetwork_prior():

    dim = np.array([10, 30, 10])
    alpha_k = 0.5
    sigma_k = np.exp(-1)
    c = np.exp(7)
    pi = 0.5
    p = 1.0
    BayesianNetwork_init = False

    BayesianNetwork_prova_prev_prev = BayesianNetwork(dim, alpha_k, sigma_k, c,
                                                      pi, p,
                                                      BayesianNetwork_init)
    BayesianNetwork_prova_prev = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p,
        BayesianNetwork_init=BayesianNetwork_prova_prev_prev)
    BayesianNetwork_prova = BayesianNetwork(
        dim,
        alpha_k,
        sigma_k,
        c,
        pi,
        p,
        BayesianNetwork_init=BayesianNetwork_prova_prev)

    # print( BayesianNetwork_prova.Linear_layer[1].mu.weight.data.numpy()[5, :] )
    # print( BayesianNetwork_prova_prev.Linear_layer[1].mu.weight.data.numpy()[5, :] )

    optimizer = optim.Adam(BayesianNetwork_prova.parameters())

    x = torch.tensor(np.random.uniform(0, 5, (20, 10)), dtype=torch.float64)
    y = torch.tensor(np.random.choice(range(0, 10), 20), dtype=torch.long)

    # for iter in range(1):
    call = BayesianNetwork_prova_prev(x)
    output_prova = BayesianNetwork_prova(x)

    loss_network = F.cross_entropy(output_prova, y)

    mu_prev, rho_prev, w_prev = BayesianNetwork_prova_prev.stack()
    loss_prior = BayesianNetwork_prova.get_gaussiandistancefromprior(
        mu_prev, mu_prev, rho_prev)

    loss = loss_network + loss_prior

    loss.backward()
    optimizer.step()

    check_diff = True
    for layer in range(0, 1):
        check_diff = (
            check_diff and
            (BayesianNetwork_prova.Linear_layer[layer].mu.weight.data.numpy()
             != BayesianNetwork_prova_prev.Linear_layer[layer].mu.weight.data.
             numpy()).any())

        check_diff = (
            check_diff and
            (BayesianNetwork_prova.Linear_layer[layer].rho.weight.data.numpy()
             != BayesianNetwork_prova_prev.Linear_layer[layer].rho.weight.data.
             numpy()).any())

        check_diff = (
            check_diff and
            (BayesianNetwork_prova.Linear_layer[layer].rho.bias.data.numpy() !=
             BayesianNetwork_prova_prev.Linear_layer[layer].rho.bias.data.
             numpy()).any())

        check_diff = (check_diff
                      and (BayesianNetwork_prova_prev_prev.Linear_layer[layer].
                           mu.bias.data.numpy() == BayesianNetwork_prova_prev.
                           Linear_layer[layer].mu.bias.data.numpy()).any())

    # print( BayesianNetwork_prova.Linear_layer[1].mu.weight.data.numpy()[5, :] )
    # print( BayesianNetwork_prova_prev.Linear_layer[1].mu.weight.data.numpy()[5, :] )

    assert (check_diff)