Exemplo n.º 1
0
def test_translationalinvariance_1d():
    Nlayers = 2
    Hs = 10
    Ht = 10
    sList = [MLP(2, Hs, activation=ScalableTanh([2])) for _ in range(Nlayers)]
    tList = [MLP(2, Ht, activation=ScalableTanh([2])) for _ in range(Nlayers)]
    masktypelist = ['evenodd', 'evenodd'] * (Nlayers // 2)
    #assamble RNVP blocks into a TEBD layer
    prior = Gaussian([8])
    layers = [
        RealNVP([2], sList, tList, Gaussian([2]), masktypelist)
        for _ in range(6)
    ]
    model = MERA(1, 2, 8, layers, prior)

    x = model.sample(10)
    xright = Roll(4, 1).forward(x)
    xleft = Roll(-4, 1).forward(x)

    logp = model.logProbability(x)
    assert_array_almost_equal(logp.data.numpy(),
                              model.logProbability(xleft).data.numpy(),
                              decimal=6)
    assert_array_almost_equal(logp.data.numpy(),
                              model.logProbability(xright).data.numpy(),
                              decimal=6)
Exemplo n.º 2
0
def test_translationalinvariance():

    #RNVP block
    Depth = 8
    Nlayers = 2
    Hs = 10
    Ht = 10
    sList = [MLP(2, Hs) for _ in range(Nlayers)]
    tList = [MLP(2, Ht) for _ in range(Nlayers)]
    masktypelist = ['channel', 'channel'] * (Nlayers // 2)

    #assamble RNVP blocks into a TEBD layer
    prior = Gaussian([8])
    layers = [
        RealNVP([2], sList, tList, Gaussian([2]), masktypelist)
        for _ in range(Depth)
    ]

    model = TEBD(1, 2, Depth, layers, prior)

    x = model.sample(10)
    xright = Roll(2, 1).forward(x)
    xleft = Roll(-2, 1).forward(x)

    logp = model.logProbability(x)
    assert_array_almost_equal(logp.data.numpy(),
                              model.logProbability(xleft).data.numpy(),
                              decimal=6)
    assert_array_almost_equal(logp.data.numpy(),
                              model.logProbability(xright).data.numpy(),
                              decimal=6)
Exemplo n.º 3
0
def test_invertible_2d():
    #RNVP block
    Nlayers = 4
    Hs = 10
    Ht = 10
    #sList = [MLPreshape(4, Hs) for _ in range(Nlayers)]
    #tList = [MLPreshape(4, Ht) for _ in range(Nlayers)]

    sList = [MLPreshape(4, Hs) for _ in range(Nlayers)]
    tList = [MLPreshape(4, Ht) for _ in range(Nlayers)]
    masktypelist = ['channel', 'channel'] * (Nlayers // 2)

    #assamble RNVP blocks into a TEBD layer
    prior = Gaussian([4, 4])
    layers = [
        RealNVP([2, 2], sList, tList, Gaussian([2, 2]), masktypelist)
        for _ in range(4)
    ]
    print(layers[0])
    print(layers[0].generate(
        Variable(torch.FloatTensor([1, 2, 3, 4]).view(1, 2, 2))))

    model = TEBD(2, [2, 2], 4, layers, prior)

    z = model.prior(1)

    print("original")

    x = model.generate(z)
    print("Forward")

    zp = model.inference(x)

    print("Backward")
    assert_array_almost_equal(z.data.numpy(), zp.data.numpy())

    saveDict = model.saveModel({})
    torch.save(saveDict, './saveNet.testSave')

    sListp = [MLPreshape(4, Hs) for _ in range(Nlayers)]
    tListp = [MLPreshape(4, Ht) for _ in range(Nlayers)]
    masktypelistp = ['channel', 'channel'] * (Nlayers // 2)

    #assamble RNVP blocks into a TEBD layer
    priorp = Gaussian([4, 4])
    layersp = [
        RealNVP([2, 2], sList, tList, Gaussian([2, 2]), masktypelist)
        for _ in range(4)
    ]

    modelp = TEBD(2, [2, 2], 4, layersp, priorp)
    saveDictp = torch.load('./saveNet.testSave')
    modelp.loadModel(saveDictp)

    xp = modelp.generate(z)

    assert_array_almost_equal(xp.data.numpy(), x.data.numpy())
Exemplo n.º 4
0
def test_invertible():

    #RNVP block
    Nlayers = 4
    Hs = 10
    Ht = 10
    sList = [MLP(2, Hs) for _ in range(Nlayers)]
    tList = [MLP(2, Ht) for _ in range(Nlayers)]
    masktypelist = ['channel', 'channel'] * (Nlayers // 2)

    #assamble RNVP blocks into a TEBD layer
    prior = Gaussian([8])
    layers = [
        RealNVP([2], sList, tList, Gaussian([2]), masktypelist)
        for _ in range(4)
    ]

    model = TEBD(1, 2, 4, layers, prior)

    z = model.prior(10)

    print("original")

    x = model.generate(z, ifLogjac=True)
    print("Forward")

    zp = model.inference(x, ifLogjac=True)

    print("Backward")
    assert_array_almost_equal(z.data.numpy(), zp.data.numpy())
    assert_array_almost_equal(model._generateLogjac.data.numpy(),
                              -model._inferenceLogjac.data.numpy())

    saveDict = model.saveModel({})
    torch.save(saveDict, './saveNet.testSave')

    sListp = [MLP(2, Hs) for _ in range(Nlayers)]
    tListp = [MLP(2, Ht) for _ in range(Nlayers)]
    masktypelistp = ['channel', 'channel'] * (Nlayers // 2)

    #assamble RNVP blocks into a TEBD layer
    priorp = Gaussian([8])
    layersp = [
        RealNVP([2], sListp, tListp, Gaussian([2]), masktypelistp)
        for _ in range(4)
    ]

    modelp = TEBD(1, 2, 4, layersp, priorp)
    saveDictp = torch.load('./saveNet.testSave')
    modelp.loadModel(saveDictp)

    xp = modelp.generate(z)

    assert_array_almost_equal(xp.data.numpy(), x.data.numpy())
Exemplo n.º 5
0
def test_gaussian_double():
    prior = Gaussian([2, 4, 4]).double()
    a = prior.sample(5)
    assert a.data.shape[0] == 5
    assert a.data.shape[1] == 2
    assert a.data.shape[2] == 4
    assert a.data.shape[3] == 4
    b = prior.logProbability(a)
    assert b.data.shape[0] == 5
    assert len(b.data.shape) == 1
    prior(5)
Exemplo n.º 6
0
def test_invertible_2d():

    Nlayers = 4
    Hs = 10
    Ht = 10
    sList = [MLPreshape(4, Hs) for _ in range(Nlayers)]
    tList = [MLPreshape(4, Ht) for _ in range(Nlayers)]
    masktypelist = ['channel', 'vchannel'] * (Nlayers // 2)
    #assamble RNVP blocks into a TEBD layer
    prior = Gaussian([8, 8])
    layers = [
        RealNVP([2, 2], sList, tList, Gaussian([2, 2]), masktypelist)
        for _ in range(6)
    ]
    print(layers[0].mask)
    #layers = [debugRealNVP() for _ in range(6)]
    model = MERA(2, [2, 2], 64, layers, prior)
    #z = prior(1)
    z = Variable(torch.from_numpy(np.arange(64)).float().view(1, 8, 8))
    x = model.generate(z)
    zz = model.inference(x)

    print(zz)
    print(z)

    assert_array_almost_equal(
        z.data.numpy(), zz.data.numpy(),
        decimal=4)  # don't work for decimal >=5, maybe caz by float

    saveDict = model.saveModel({})
    torch.save(saveDict, './saveNet.testSave')

    Nlayersp = 4
    Hsp = 10
    Htp = 10
    sListp = [MLPreshape(4, Hsp) for _ in range(Nlayersp)]
    tListp = [MLPreshape(4, Htp) for _ in range(Nlayersp)]
    masktypelistp = ['channel', 'vchannel'] * (Nlayersp // 2)
    #assamble RNVP blocks into a TEBD layer
    priorp = Gaussian([8, 8])
    layersp = [
        RealNVP([2, 2], sListp, tListp, Gaussian([2, 2]), masktypelistp)
        for _ in range(6)
    ]
    modelp = MERA(2, [2, 2], 64, layersp, priorp)

    saveDictp = torch.load('./saveNet.testSave')
    modelp.loadModel(saveDictp)

    xp = modelp.generate(z)

    assert_array_almost_equal(xp.data.numpy(), x.data.numpy())
Exemplo n.º 7
0
def test_workmode2():
    gaussian = Gaussian([2])

    sList = [MLP(1, 10), MLP(1, 10), MLP(1, 10), MLP(1, 10)]
    tList = [MLP(1, 10), MLP(1, 10), MLP(1, 10), MLP(1, 10)]

    realNVP = RealNVP([2], sList, tList, gaussian, mode=2)

    z = realNVP.prior(10)

    x = realNVP.generate(z, sliceDim=0)

    zp = realNVP.inference(x, sliceDim=0)

    assert_array_almost_equal(z.data.numpy(), zp.data.numpy())

    saveDict = realNVP.saveModel({})
    torch.save(saveDict, './saveNet.testSave')
    # realNVP.loadModel({})
    sListp = [MLP(1, 10), MLP(1, 10), MLP(1, 10), MLP(1, 10)]
    tListp = [MLP(1, 10), MLP(1, 10), MLP(1, 10), MLP(1, 10)]

    realNVPp = RealNVP([2], sListp, tListp, gaussian)
    saveDictp = torch.load('./saveNet.testSave')
    realNVPp.loadModel(saveDictp)

    xx = realNVP.generate(z, sliceDim=0)
    print("Forward after restore")

    assert_array_almost_equal(xx.data.numpy(), x.data.numpy())
Exemplo n.º 8
0
def test_logProbabilityWithInference_cuda():
    gaussian3d = Gaussian([2, 4, 4])
    x3d = gaussian3d(3).cuda()
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0], [1, 2, 1, 0]]
    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP3d = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d).cuda()
    mask3d = realNVP3d.createMask(["checkerboard"] * 4, cuda=0)

    z3d = realNVP3d.generate(x3d)
    zp3d = realNVP3d.inference(z3d)

    print(realNVP3d.logProbabilityWithInference(z3d)[1])

    assert_array_almost_equal(x3d.cpu().data.numpy(), zp3d.cpu().data.numpy())
Exemplo n.º 9
0
def test_sample():
    gaussian3d = Gaussian([2, 4, 4])
    x3d = gaussian3d(3)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0], [1, 2, 1, 0]]
    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP3d = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d,
                        "checkerboard")

    z3d = realNVP3d.sample(100, True)

    zp3d = realNVP3d.sample(100, False)

    print(realNVP3d.logProbability(z3d))
Exemplo n.º 10
0
def test_tempalte_invertibleMLP():

    print("test mlp")

    gaussian = Gaussian([2])

    sList = [MLP(2, 10), MLP(2, 10), MLP(2, 10), MLP(2, 10)]
    tList = [MLP(2, 10), MLP(2, 10), MLP(2, 10), MLP(2, 10)]

    realNVP = RealNVP([2], sList, tList, gaussian)
    x = realNVP.prior(10)
    mask = realNVP.createMask(["channel"] * 4, ifByte=0)
    print("original")
    #print(x)

    z = realNVP._generate(x, realNVP.mask, realNVP.mask_, True)

    print("Forward")
    #print(z)

    zp = realNVP._inference(z, realNVP.mask, realNVP.mask_, True)

    print("Backward")
    #print(zp)

    assert_array_almost_equal(realNVP._generateLogjac.data.numpy(),
                              -realNVP._inferenceLogjac.data.numpy())

    print("logProbability")
    print(realNVP._logProbability(z, realNVP.mask, realNVP.mask_))

    assert_array_almost_equal(x.data.numpy(), zp.data.numpy())
Exemplo n.º 11
0
def test_posInvariance_mera():
    prior = Gaussian([8, 8])
    layers = [debugRealNVP() for _ in range(9)]
    #layers = [debugRealNVP() for _ in range(6)]
    t1 = torch.FloatTensor([0, 4, 32, 36]).view(1, 2, 2)
    t2 = torch.FloatTensor(
        [0, 2, 4, 6, 16, 18, 20, 22, 32, 34, 36, 38, 48, 50, 52,
         54]).view(1, 4, 4)

    model = MERA(2, [2, 2], 64, layers, prior, metaDepth=3)
    z = Variable(torch.from_numpy(np.arange(64)).float().view(1, 8, 8))
    t = [t1, t2, z.data]
    print(z)
    x = model.generate(z, save=True)
    tmp = 0
    for i, p in enumerate(model.saving):
        if (i) % 3 == 0 and i != 0:
            tmp += 1
        assert_array_almost_equal(p.data.numpy(), t[tmp].numpy())
    print(x)
    zz = model.inference(x, save=True)
    tmp = 0
    for i, p in enumerate(reversed(model.saving)):
        if (i) % 3 == 0 and i != 0:
            tmp += 1
        assert_array_almost_equal(p.data.numpy(), t[tmp].numpy())

    print(zz)

    assert_array_almost_equal(
        z.data.numpy(), zz.data.numpy(),
        decimal=4)  # don't work for decimal >=5, maybe caz by float
Exemplo n.º 12
0
def test_tempalte_contraction_mlp():
    gaussian = Gaussian([2])

    sList = [MLP(1, 10), MLP(1, 10), MLP(1, 10), MLP(1, 10)]
    tList = [MLP(1, 10), MLP(1, 10), MLP(1, 10), MLP(1, 10)]

    realNVP = RealNVP([2], sList, tList, gaussian)

    x = realNVP.prior(10)
    mask = realNVP.createMask(["channel"] * 4, ifByte=1)
    print("original")
    #print(x)

    z = realNVP._generateWithContraction(x, realNVP.mask, realNVP.mask_, 0,
                                         True)

    print("Forward")
    #print(z)

    zp = realNVP._inferenceWithContraction(z, realNVP.mask, realNVP.mask_, 0,
                                           True)

    print("Backward")
    #print(zp)
    assert_array_almost_equal(realNVP._generateLogjac.data.numpy(),
                              -realNVP._inferenceLogjac.data.numpy())

    x_data = realNVP.prior(10)
    y_data = realNVP.prior.logProbability(x_data)
    print("logProbability")
    '''
Exemplo n.º 13
0
def test_parallel():
    gaussian3d = Gaussian([2, 4, 4])
    x = gaussian3d(3)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0], [1, 2, 1, 0]]
    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)
    z = realNVP(x)
    print(z)
    net = torch.nn.DataParallel(realNVP.cuda(0), device_ids=[0, 1])
    output = net(x.cuda())
    print(output)

    assert_array_almost_equal(z.data.numpy(),
                              output.cpu().data.numpy(),
                              decimal=5)
Exemplo n.º 14
0
def test_slice_cudaNo0():
    gaussian3d = Gaussian([2, 4, 4])
    x = gaussian3d(3).cuda(2)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0], [1, 2, 1, 0]]
    sList3d = [
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure)
    ]
    tList3d = [
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure)
    ]

    realNVP = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)
    realNVP = realNVP.cuda(2)
    z = realNVP._generateWithSlice(x, 0, True)
    print(realNVP._logProbabilityWithSlice(z, 0))
    zz = realNVP._inferenceWithSlice(z, 0, True)
    assert_array_almost_equal(x.cpu().data.numpy(), zz.cpu().data.numpy())
    assert_array_almost_equal(realNVP._generateLogjac.data.cpu().numpy(),
                              -realNVP._inferenceLogjac.data.cpu().numpy())
Exemplo n.º 15
0
def test_tempalte_contractionCNN_checkerboard_cuda():
    gaussian3d = Gaussian([2, 4, 4])
    x3d = gaussian3d(3).cuda()
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0], [2, 2, 1, 0]]
    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP3d = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)
    realNVP3d = realNVP3d.cuda()
    mask3d = realNVP3d.createMask(["checkerboard"] * 4, ifByte=0, cuda=0)
    z3d = realNVP3d._generate(x3d, realNVP3d.mask, realNVP3d.mask_, True)
    zp3d = realNVP3d._inference(z3d, realNVP3d.mask, realNVP3d.mask_, True)
    print(realNVP3d._logProbability(z3d, realNVP3d.mask, realNVP3d.mask_))
    assert_array_almost_equal(x3d.cpu().data.numpy(), zp3d.cpu().data.numpy())
    assert_array_almost_equal(realNVP3d._generateLogjac.data.cpu().numpy(),
                              -realNVP3d._inferenceLogjac.data.cpu().numpy())
Exemplo n.º 16
0
def test_invertible_1d():
    Nlayers = 4
    Hs = 10
    Ht = 10
    sList = [MLP(2, Hs) for _ in range(Nlayers)]
    tList = [MLP(2, Ht) for _ in range(Nlayers)]
    masktypelist = ['channel', 'channel'] * (Nlayers // 2)
    #assamble RNVP blocks into a TEBD layer
    prior = Gaussian([8])
    layers = [
        RealNVP([2], sList, tList, Gaussian([2]), masktypelist)
        for _ in range(6)
    ]
    model = MERA(1, 2, 8, layers, prior)
    z = prior(4)
    x = model.generate(z, ifLogjac=True)
    zz = model.inference(x, ifLogjac=True)

    assert_array_almost_equal(z.data.numpy(), zz.data.numpy())
    print(model._generateLogjac)
    print(model._inferenceLogjac)
    assert_array_almost_equal(model._generateLogjac.data.numpy(),
                              -model._inferenceLogjac.data.numpy())

    saveDict = model.saveModel({})
    torch.save(saveDict, './saveNet.testSave')

    Nlayersp = 4
    Hsp = 10
    Htp = 10
    sListp = [MLP(2, Hsp) for _ in range(Nlayersp)]
    tListp = [MLP(2, Htp) for _ in range(Nlayersp)]
    masktypelistp = ['channel', 'channel'] * (Nlayersp // 2)
    #assamble RNVP blocks into a TEBD layer
    priorp = Gaussian([8])
    layersp = [
        RealNVP([2], sListp, tListp, Gaussian([2]), masktypelistp)
        for _ in range(6)
    ]
    modelp = MERA(1, 2, 8, layersp, priorp)

    saveDictp = torch.load('./saveNet.testSave')
    modelp.loadModel(saveDictp)

    xp = modelp.generate(z)

    assert_array_almost_equal(xp.data.numpy(), x.data.numpy())
Exemplo n.º 17
0
def test_invertible_2d_metaDepth3():
    Nlayers = 4
    Hs = 10
    Ht = 10
    sList = [MLPreshape(4, Hs) for _ in range(Nlayers)]
    tList = [MLPreshape(4, Ht) for _ in range(Nlayers)]
    masktypelist = ['channel', 'channel'] * (Nlayers // 2)
    #assamble RNVP blocks into a TEBD layer
    prior = Gaussian([8, 8])
    layers = [
        RealNVP([2, 2], sList, tList, Gaussian([2, 2]), masktypelist)
        for _ in range(9)
    ]
    #layers = [debugRealNVP() for _ in range(6)]
    model = MERA(2, [2, 2], 64, layers, prior, metaDepth=3)
    z = Variable(torch.from_numpy(np.arange(64)).float().view(1, 8, 8))
    x = model.generate(z)
    zz = model.inference(x)

    assert_array_almost_equal(
        z.data.numpy(), zz.data.numpy(),
        decimal=4)  # don't work for decimal >=5, maybe caz by float
Exemplo n.º 18
0
def test_invertible():

    print("test realNVP")
    gaussian = Gaussian([2])

    sList = [MLP(2, 10), MLP(2, 10), MLP(2, 10), MLP(2, 10)]
    tList = [MLP(2, 10), MLP(2, 10), MLP(2, 10), MLP(2, 10)]

    realNVP = RealNVP([2], sList, tList, gaussian)

    print(realNVP.mask)
    print(realNVP.mask_)
    z = realNVP.prior(10)
    #mask = realNVP.createMask()
    assert realNVP.mask.shape[0] == 4
    assert realNVP.mask.shape[1] == 2

    print("original")
    #print(x)

    x = realNVP.generate(z)

    print("Forward")
    #print(z)

    zp = realNVP.inference(x)

    print("Backward")
    #print(zp)

    assert_array_almost_equal(z.data.numpy(), zp.data.numpy())

    saveDict = realNVP.saveModel({})
    torch.save(saveDict, './saveNet.testSave')
    # realNVP.loadModel({})
    sListp = [MLP(2, 10), MLP(2, 10), MLP(2, 10), MLP(2, 10)]
    tListp = [MLP(2, 10), MLP(2, 10), MLP(2, 10), MLP(2, 10)]

    realNVPp = RealNVP([2], sListp, tListp, gaussian)
    saveDictp = torch.load('./saveNet.testSave')
    realNVPp.loadModel(saveDictp)

    xx = realNVP.generate(z)
    print("Forward after restore")

    assert_array_almost_equal(xx.data.numpy(), x.data.numpy())
Exemplo n.º 19
0
def test_posInvariance_tebd():
    prior = Gaussian([4, 4])
    layers = [debugRealNVP() for _ in range(4)]

    model = TEBD(2, [2, 2], 4, layers, prior)

    #z = model.prior(1)
    z = Variable(torch.from_numpy(np.arange(16)).view(1, 4, 4).float())

    x = model.generate(z, save=True)
    assert_array_almost_equal(z.data.numpy(), x.data.numpy())

    for p in model.saving:
        assert_array_almost_equal(z.data.numpy(), p.data.numpy())

    zp = model.inference(x, save=True)

    for p in model.saving:
        assert_array_almost_equal(z.data.numpy(), p.data.numpy())

    assert_array_almost_equal(z.data.numpy(), zp.data.numpy())
Exemplo n.º 20
0
def test_contraction_cuda_withDifferentMasks():
    gaussian3d = Gaussian([2, 4, 4])
    x = gaussian3d(3).cuda()
    #z3dp = z3d[:,0,:,:].view(10,-1,4,4)
    #print(z3dp)

    #print(x)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0],
                    [1, 2, 1, 0]]  # [channel, filter_size, stride, padding]

    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)
    realNVP = realNVP.cuda()
    mask = realNVP.createMask(
        ["channel", "checkerboard", "channel", "checkerboard"], 1, cuda=0)

    z = realNVP._generateWithContraction(x, realNVP.mask, realNVP.mask_, 2,
                                         True)
    print(
        realNVP._logProbabilityWithContraction(z, realNVP.mask, realNVP.mask_,
                                               2))
    zz = realNVP._inferenceWithContraction(z, realNVP.mask, realNVP.mask_, 2,
                                           True)

    assert_array_almost_equal(x.cpu().data.numpy(), zz.cpu().data.numpy())
    assert_array_almost_equal(realNVP._generateLogjac.data.cpu().numpy(),
                              -realNVP._inferenceLogjac.data.cpu().numpy())
Exemplo n.º 21
0
def test_multiplyMask_generateWithSlice_CNN():
    gaussian3d = Gaussian([2, 4, 4])
    x = gaussian3d(3)
    #z3dp = z3d[:,0,:,:].view(10,-1,4,4)
    #print(z3dp)

    #print(x)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0],
                    [1, 2, 1, 0]]  # [channel, filter_size, stride, padding]

    sList3d = [
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure)
    ]
    tList3d = [
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure)
    ]

    realNVP = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)
    mask = realNVP.createMask(
        ["channel", "checkerboard", "channel", "checkerboard"], ifByte=1)

    z = realNVP._generateWithSlice(x, 0, True)
    #print(z)
    zz = realNVP._inferenceWithSlice(z, 0, True)

    #print(zz)

    assert_array_almost_equal(x.data.numpy(), zz.data.numpy())
    #print(realNVP._generateLogjac.data.numpy())
    #print(realNVP._inferenceLogjac.data.numpy())
    assert_array_almost_equal(realNVP._generateLogjac.data.numpy(),
                              -realNVP._inferenceLogjac.data.numpy())
Exemplo n.º 22
0
def test_template_contraction_function_with_channel():
    gaussian3d = Gaussian([2, 4, 4])
    x = gaussian3d(3)
    #z3dp = z3d[:,0,:,:].view(10,-1,4,4)
    #print(z3dp)

    #print(x)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0],
                    [1, 2, 1, 0]]  # [channel, filter_size, stride, padding]

    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)
    mask = realNVP.createMask(["channel"] * 4, 1)

    z = realNVP._generateWithContraction(x, realNVP.mask, realNVP.mask_, 2,
                                         True)
    #print(z)

    zz = realNVP._inferenceWithContraction(z, realNVP.mask, realNVP.mask_, 2,
                                           True)
    #print(zz)

    assert_array_almost_equal(x.data.numpy(), zz.data.numpy())
    assert_array_almost_equal(realNVP._generateLogjac.data.numpy(),
                              -realNVP._inferenceLogjac.data.numpy())
Exemplo n.º 23
0
def test_forward():
    gaussian3d = Gaussian([2, 4, 4])
    x = gaussian3d(3)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0], [1, 2, 1, 0]]
    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)
    z = realNVP(x)
    assert (list(z.data.shape) == [3])
    #assert(z.shape ==)
    realNVP.pointer = "generate"
    z = realNVP(x)
    assert (list(z.data.shape) == [3, 2, 4, 4])
Exemplo n.º 24
0
def test_template_slice_function():
    gaussian3d = Gaussian([2, 4, 4])
    x = gaussian3d(3)
    #z3dp = z3d[:,0,:,:].view(10,-1,4,4)
    #print(z3dp)

    #print(x)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0],
                    [1, 2, 1, 0]]  # [channel, filter_size, stride, padding]

    sList3d = [
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure)
    ]
    tList3d = [
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure),
        CNN(netStructure)
    ]

    realNVP = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)

    z = realNVP._generateWithSlice(x, 0, True)
    #print(z)
    zz = realNVP._inferenceWithSlice(z, 0, True)

    #print(zz)

    assert_array_almost_equal(x.data.numpy(), zz.data.numpy())
    #print(realNVP._generateLogjac.data.numpy())
    #print(realNVP._inferenceLogjac.data.numpy())
    assert_array_almost_equal(realNVP._generateLogjac.data.numpy(),
                              -realNVP._inferenceLogjac.data.numpy())
Exemplo n.º 25
0
mlpsize = int(np.product(np.array(kernel_size)))
nperdepth = (Ndisentangler + 1
             )  # number of disentangers + number of decimator at each RG step
depth = int(math.log(Nvars, mlpsize))

print('depth of the mera network', depth)
sList = [[
    MLPreshape(mlpsize, Hs, activation=ScalableTanh([mlpsize]))
    for _ in range(Nlayers)
] for l in range(nperdepth * depth)]

tList = [[MLPreshape(mlpsize, Ht) for _ in range(Nlayers)]
         for l in range(nperdepth * depth)]

masktypelist = ['channel', 'channel'] * (Nlayers // 2)
prior = Gaussian([L, L])
#assamble RNVP blocks into a MERA
layers = [
    RealNVP(kernel_size, sList[l], tList[l], None, masktypelist)
    for l in range(nperdepth * depth)
]

model = MERA(d, kernel_size, Nvars, layers, prior, metaDepth=Ndisentangler + 1)

model.loadModel(torch.load(args.modelname))

z = prior(args.batch)

x = model.generate(z, save=True)

N = len(model.saving) // (Ndisentangler + 1)
Exemplo n.º 26
0
    elif args.target == 'mog2':
        target = Mog2(args.offset)
    elif args.target == 'phi4':
        target = Phi4(4,2,0.15,1.145)
    elif args.target == 'ising':
        target = Ising(args.L, args.d, args.T, cuda, args.double)
    else:
        print ('what target ?', args.target)
        sys.exit(1)

    Nvars = target.nvars 

    if args.prior == 'gaussian':
        if args.d== 2:
            input_size = [args.L, args.L]
            prior = Gaussian([args.L, args.L], requires_grad = args.train_prior)
        else:
            input_size = [Nvars]
            prior = Gaussian([Nvars], requires_grad = args.train_prior)
    elif args.prior == 'gmm':
        prior = GMM([Nvars])
    else:
        print ('what prior?', args.prior)
        sys.exit(1)
    print ('prior:', prior.name)

    key = args.folder \
          + args.target 

    if (args.target=='ising'):
        key += '_L' + str(args.L)\
Exemplo n.º 27
0
def test_tempalte_invertibleCNN():

    gaussian3d = Gaussian([2, 4, 4])
    x3d = gaussian3d(3)
    #z3dp = z3d[:,0,:,:].view(10,-1,4,4)
    #print(z3dp)

    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0],
                    [2, 2, 1, 0]]  # [channel, filter_size, stride, padding]

    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP3d = RealNVP([2, 4, 4], sList3d, tList3d, gaussian3d)
    mask3d = realNVP3d.createMask(["channel"] * 4, ifByte=0)

    print("Testing 3d")
    print("3d original:")
    #print(x3d)

    z3d = realNVP3d._generate(x3d, realNVP3d.mask, realNVP3d.mask_, True)
    print("3d forward:")
    #print(z3d)

    zp3d = realNVP3d._inference(z3d, realNVP3d.mask, realNVP3d.mask_, True)
    print("Backward")
    #print(zp3d)

    assert_array_almost_equal(realNVP3d._generateLogjac.data.numpy(),
                              -realNVP3d._inferenceLogjac.data.numpy())

    print("3d logProbability")
    print(realNVP3d._logProbability(z3d, realNVP3d.mask, realNVP3d.mask_))

    saveDict3d = realNVP3d.saveModel({})
    torch.save(saveDict3d, './saveNet3d.testSave')
    # realNVP.loadModel({})
    sListp3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tListp3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVPp3d = RealNVP([2, 4, 4], sListp3d, tListp3d, gaussian3d)
    saveDictp3d = torch.load('./saveNet3d.testSave')
    realNVPp3d.loadModel(saveDictp3d)

    zz3d = realNVPp3d._generate(x3d, realNVPp3d.mask, realNVPp3d.mask_)
    print("3d Forward after restore")
    #print(zz3d)

    assert_array_almost_equal(x3d.data.numpy(), zp3d.data.numpy())
    assert_array_almost_equal(zz3d.data.numpy(), z3d.data.numpy())
Exemplo n.º 28
0
def test_3d():

    gaussian3d = Gaussian([2, 4, 4])
    x3d = gaussian3d(3)
    #z3dp = z3d[:,0,:,:].view(10,-1,4,4)
    #print(z3dp)

    #print(x)
    netStructure = [[3, 2, 1, 1], [4, 2, 1, 1], [3, 2, 1, 0],
                    [1, 2, 1, 0]]  # [channel, filter_size, stride, padding]

    sList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tList3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVP3d = RealNVP([2, 4, 4], sList3d, tList3d,
                        gaussian3d)  #,maskType = "checkerboard")
    print(realNVP3d.mask)
    #mask3d = realNVP3d.createMask()

    assert realNVP3d.mask.shape[0] == 4
    assert realNVP3d.mask.shape[1] == 2
    assert realNVP3d.mask.shape[2] == 4
    assert realNVP3d.mask.shape[3] == 4

    print("test high dims")

    print("Testing 3d")
    print("3d original:")
    #print(x3d)

    z3d = realNVP3d.generate(x3d)
    print("3d forward:")
    #print(z3d)

    zp3d = realNVP3d.inference(z3d)
    print("Backward")
    #print(zp3d)

    print("3d logProbability")
    print(realNVP3d.logProbability(z3d))

    saveDict3d = realNVP3d.saveModel({})
    torch.save(saveDict3d, './saveNet3d.testSave')
    # realNVP.loadModel({})
    sListp3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]
    tListp3d = [
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2),
        CNN(netStructure, inchannel=2)
    ]

    realNVPp3d = RealNVP([2, 4, 4], sListp3d, tListp3d, gaussian3d)
    saveDictp3d = torch.load('./saveNet3d.testSave')
    realNVPp3d.loadModel(saveDictp3d)

    zz3d = realNVPp3d.generate(x3d)
    print("3d Forward after restore")
    #print(zz3d)

    assert_array_almost_equal(x3d.data.numpy(), zp3d.data.numpy())
    assert_array_almost_equal(zz3d.data.numpy(), z3d.data.numpy())
Exemplo n.º 29
0
def test_mera_1d():
    masks = [
        Variable(torch.ByteTensor([1, 0, 1, 0, 1, 0, 1, 0])),
        Variable(torch.ByteTensor([1, 0, 0, 0, 1, 0, 0, 0]))
    ]
    masks_ = [
        Variable(torch.ByteTensor([0, 1, 0, 1, 0, 1, 0, 1])),
        Variable(torch.ByteTensor([0, 1, 1, 1, 0, 1, 1, 1]))
    ]

    rollList = [
        Placeholder(),
        Roll(1, 1),
        Placeholder(),
        Roll(1, 1),
        Placeholder(),
        Roll(1, 1)
    ]
    maskList = [
        Placeholder(2),
        Placeholder(2),
        Mask(masks[0], masks_[0]),
        Mask(masks[0], masks_[0]),
        Mask(masks[1], masks_[1]),
        Mask(masks[1], masks_[1])
    ]
    Nlayers = 4
    Hs = 10
    Ht = 10
    sList = [MLP(2, Hs) for _ in range(Nlayers)]
    tList = [MLP(2, Ht) for _ in range(Nlayers)]
    masktypelist = ['channel', 'channel'] * (Nlayers // 2)
    #assamble RNVP blocks into a TEBD layer
    prior = Gaussian([8])
    layers = [
        RealNVP([2], sList, tList, Gaussian([2]), masktypelist)
        for _ in range(6)
    ]
    model = HierarchyBijector(1, [2 for _ in range(6)], rollList, layers,
                              maskList, Gaussian([8]))
    z = prior(4)
    print(z)
    x = model.inference(z, True)
    print(x)
    fLog = model._inferenceLogjac
    print(model._inferenceLogjac)
    zz = model.generate(x, True)
    print(zz)
    bLog = model._generateLogjac
    print(model._generateLogjac)

    print(model.sample(10))

    saveDict = model.saveModel({})
    torch.save(saveDict, './savetest.testSave')

    masksp = [
        Variable(torch.ByteTensor([1, 0, 1, 0, 1, 0, 1, 0])),
        Variable(torch.ByteTensor([1, 0, 0, 0, 1, 0, 0, 0]))
    ]
    masks_p = [
        Variable(torch.ByteTensor([0, 1, 0, 1, 0, 1, 0, 1])),
        Variable(torch.ByteTensor([0, 1, 1, 1, 0, 1, 1, 1]))
    ]

    rollListp = [
        Placeholder(),
        Roll(1, 1),
        Placeholder(),
        Roll(1, 1),
        Placeholder(),
        Roll(1, 1)
    ]
    maskListp = [
        Placeholder(2),
        Placeholder(2),
        Mask(masksp[0], masks_p[0]),
        Mask(masksp[0], masks_p[0]),
        Mask(masksp[1], masks_p[1]),
        Mask(masksp[1], masks_p[1])
    ]
    Nlayersp = 4
    Hsp = 10
    Htp = 10
    sListp = [MLP(2, Hsp) for _ in range(Nlayersp)]
    tListp = [MLP(2, Htp) for _ in range(Nlayersp)]
    masktypelistp = ['channel', 'channel'] * (Nlayersp // 2)
    #assamble RNVP blocks into a TEBD layer
    priorp = Gaussian([8])
    layersp = [
        RealNVP([2], sListp, tListp, Gaussian([2]), masktypelistp)
        for _ in range(6)
    ]
    modelp = HierarchyBijector(1, [2 for _ in range(6)], rollListp, layersp,
                               maskListp, Gaussian([8]))

    saveDictp = torch.load('./savetest.testSave')
    modelp.loadModel(saveDictp)

    xp = modelp.inference(z)

    print(xp)

    assert_array_almost_equal(z.data.numpy(), zz.data.numpy())
    assert_array_almost_equal(fLog.data.numpy(), -bLog.data.numpy())
    assert_array_almost_equal(xp.data.numpy(), x.data.numpy())