Esempio n. 1
0
def Hellinger(theta):
    # Example of usage of the code provided for answering Q2.5 as well as recommended hyper parameters.
    model = q2_model.Critic(2)
    optim = torch.optim.SGD(model.parameters(), lr=1e-3)
    sampler1 = iter(q2_sampler.distribution1(0, 512))
    sampler2 = iter(q2_sampler.distribution1(theta, 512))
    lambda_reg_lp = 50  # Recommended hyper parameters for the lipschitz regularizer.
    steps = 500
    for step in range(steps):
        data1 = torch.from_numpy(next(sampler1)).float()
        data2 = torch.from_numpy(next(sampler2)).float()
        loss = -vf_squared_hellinger(data1, data2, model)
        print('Step {} : loss {}'.format(step, loss))
        optim.zero_grad()
        loss.backward()
        optim.step()
    data1 = torch.from_numpy(next(sampler1)).float()
    data2 = torch.from_numpy(next(sampler2)).float()
    return vf_squared_hellinger(data1, data2, model)
Esempio n. 2
0
    t = one - torch.exp(-critic_y)
    return torch.mean(one - torch.exp(-critic_x)) + torch.mean(-t / (one - t))


if __name__ == '__main__':
    # Example of usage of the code provided for answering Q2.5 as well as recommended hyper parameters.
    lambda_reg_lp = 50  # Recommended hyper parameters for the lipschitz regularizer.

    sh = []
    w = []
    wlp = []

    thetas = [theta * 0.1 for theta in range(0, 21)]

    for theta1 in thetas:
        modelsh = q2_model.Critic(2)
        #modelw = q2_model.Critic(2)
        modelwlp = q2_model.Critic(2)

        optimsh = torch.optim.SGD(modelsh.parameters(), lr=1e-3)
        #optimw = torch.optim.SGD(modelw.parameters(), lr=1e-3)
        optimwlp = torch.optim.SGD(modelwlp.parameters(), lr=1e-3)

        sampler1 = iter(q2_sampler.distribution1(0, 512))
        sampler2 = iter(q2_sampler.distribution1(theta1, 512))

        for i in range(500):
            x = torch.Tensor(next(sampler1))
            y = torch.Tensor(next(sampler2))

            modelsh.zero_grad()
Esempio n. 3
0
    :param critic: (Module) - torch module used to compute the Wasserstein distance
    :return: (FloatTensor) - shape: (1,) - Estimate of the Wasserstein distance
    """
    return torch.mean(critic(x)) - torch.mean(critic(y))


def vf_squared_hellinger(x, y, critic):
    """
    Complete me. DONT MODIFY THE PARAMETERS OF THE FUNCTION. Otherwise, tests might fail.

    *** The notation used for the parameters follow the one from Nowazin et al: https://arxiv.org/pdf/1606.00709.pdf
    In other word, x are samples from the distribution P and y are samples from the distribution Q. Please note that the Critic is unbounded. ***

    :param p: (FloatTensor) - shape: (batchsize x featuresize) - Samples from a distribution p.
    :param q: (FloatTensor) - shape: (batchsize x featuresize) - Samples from a distribution q.
    :param critic: (Module) - torch module used to compute the Squared Hellinger.
    :return: (FloatTensor) - shape: (1,) - Estimate of the Squared Hellinger
    """
    return torch.mean(1 - torch.exp(-critic(x))) - torch.mean(
        (1 - torch.exp(-critic(y))) / (torch.exp(-critic(y))))


if __name__ == '__main__':
    # Example of usage of the code provided for answering Q2.5 as well as recommended hyper parameters.
    model = q2_model.Critic(2)
    optim = torch.optim.SGD(model.parameters(), lr=1e-3)
    sampler1 = iter(q2_sampler.distribution1(0, 512))
    theta = 0
    sampler2 = iter(q2_sampler.distribution1(theta, 512))
    lambda_reg_lp = 50  # Recommended hyper parameters for the lipschitz regularizer.
Esempio n. 4
0
if __name__ == '__main__':
    # Example of usage of the code provided for answering Q2.5 as well as recommended hyper parameters.

    thetas = np.arange(0.0, 2.1, 0.1)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    dtype = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor
    print("Using ", device)
    torch.manual_seed(10)
    np.random.seed(10)

    from matplotlib import pyplot as plt
    hellinger_dict = {}
    wd_dict = {}
    for theta in thetas:
        model1 = q2_model.Critic(2).to(device)
        optim1 = torch.optim.SGD(model1.parameters(), lr=1e-3)
        model2 = q2_model.Critic(2).to(device)
        optim2 = torch.optim.SGD(model2.parameters(), lr=1e-3)

        lambda_reg_lp = 50  # Recommended hyper parameters for the lipschitz regularizer.
        iterations = 2500

        for i in range(iterations):

            # print("iteration and theta is: ", i, "  ", theta)

            ## data is the same for both the models
            sampler1 = iter(q2_sampler.distribution1(0, 512))
            sampler2 = iter(q2_sampler.distribution1(theta, 512))
            data1 = torch.from_numpy(next(sampler1)).to(device)