コード例 #1
0
def test_calculate_cob_weights(network,
                               model_name=None,
                               input_shape=(1, 1, 28, 28),
                               noise=False,
                               verbose=True):
    """
        Test if a cob can be calculated and applied to a network to teleport the network from the initial weights to
        the targets weights.

    Args:
        network (nn.Module): Network to be tested
        model_name (str): The name or label assigned to differentiate the model
        input_shape (tuple): Input shape of network
        noise (bool): whether to add noise to the target weights before optimisation.
        verbose (bool): whether to display sample ouputs during the test
    """
    model_name = model_name or network.__class__.__name__
    model = NeuralTeleportationModel(network=network, input_shape=input_shape)

    initial_weights = model.get_weights()
    w1 = model.get_weights(concat=False, flatten=False, bias=False)

    model.random_teleport()
    c1 = model.get_cob()
    model.random_teleport()
    c2 = model.get_cob()

    target_weights = model.get_weights()
    w2 = model.get_weights(concat=False, flatten=False, bias=False)

    if noise:
        for w in w2:
            w += torch.rand(w.shape) * 0.001

    calculated_cob = model.calculate_cob(w1, w2)

    model.initialize_cob()
    model.set_weights(initial_weights)
    model.teleport(calculated_cob, reset_teleportation=True)

    calculated_weights = model.get_weights()

    error = (calculated_weights - initial_weights).abs().mean()

    if verbose:
        print("weights: ", target_weights.flatten())
        print("Calculated cob weights: ", calculated_weights.flatten())
        print("Weight error ", error)
        print("C1: ", c1.flatten()[:10])
        print("C2: ", c2.flatten()[:10])
        print("C1 * C2: ", (c1 * c2).flatten()[:10])
        print("Calculated cob: ", calculated_cob.flatten()[:10])

    assert np.allclose(calculated_weights.detach().numpy(), target_weights.detach().numpy()), \
        "Calculate cob and weights FAILED for " + model_name + " model with error: " + str(error.item())

    print("Calculate cob and weights successful for " + model_name + " model.")
コード例 #2
0
    w1 = model1.get_weights()
    w2 = model2.get_weights()
    diff = (w1.detach().cpu() - w2.detach().cpu()).abs().mean()
    print("Initial weight difference :", diff)

    w1 = model1.get_weights(concat=False, flatten=False, bias=False)
    w2 = model2.get_weights(concat=False, flatten=False, bias=False)
    calculated_cob = model1.calculate_cob(w1,
                                          w2,
                                          concat=True,
                                          eta=0.00001,
                                          steps=6000)
    torch.save(calculated_cob, pjoin(save_path, 'calculated_cob.pt'))

    model1.teleport(calculated_cob)

    w1 = model1.get_weights()
    w2 = model2.get_weights()
    diff = (w1.detach().cpu() - w2.detach().cpu()).abs().mean()
    print("Predicted weight difference :", diff)

    w1 = model1.get_weights(concat=False, flatten=False, bias=False)
    w2 = model2.get_weights(concat=False, flatten=False, bias=False)

    print("Weight difference by layer:")
    for i in range(len(w1)):
        print('layer : ', i)
        print("w1  - w2 = ",
              (w1[i].detach().cpu() - w2[i].detach().cpu()).abs().sum())
        print("w1: ", w1[i].detach().cpu().flatten()[:10])
コード例 #3
0
    cob_error_history = []

    print("Initial error: ", (cob - target_cob).abs().mean().item())
    print("Target cob sample: ", target_cob[0:10].data)
    print("cob sample: ", cob[0:10].data)

    optimizer = optim.Adam([cob], lr=args.lr)
    """
    Optimize the cob to find the 'target_cob' that produced the original teleportation. 
    """
    for e in range(args.steps):
        # Reset the initial weights.
        model.set_weights(initial_weights)

        # Teleport with this cob
        model.teleport(cob)

        # Get the new weights and calculate the loss
        weights = model.get_weights()
        loss = (weights - target_weights).square().mean()

        # Backwards pass
        # add retain_graph=True to avoid error when running backward through the graph a second time
        loss.backward(retain_graph=True)
        optimizer.step()
        optimizer.zero_grad()

        history.append(loss.item())
        cob_error_history.append((cob - target_cob).square().mean().item())
        if e % 100 == 0:
            print("Step: {}, loss: {}, cob error: {}".format(