示例#1
0
def test_exponentialQuadraticKernelDiag():
    tol = 1e-6
    dataFilename = os.path.join(os.path.dirname(__file__),
                                "data/Kdiag_rbfKernel.mat")

    mat = loadmat(dataFilename)
    t = torch.from_numpy(mat['X1']).type(torch.DoubleTensor).permute(2, 0, 1)
    leasKDiag = torch.from_numpy(mat['Gdiag']).type(
        torch.DoubleTensor).permute(2, 0, 1)
    lengthScale = float(mat['lengthscale'][0, 0])
    scale = float(mat['variance'][0, 0])
    params = torch.tensor([lengthScale])

    kernel = ExponentialQuadraticKernel(scale=scale)
    kernel.setParams(params=params)

    KDiag = kernel.buildKernelMatrixDiag(X=t)

    error = math.sqrt(((KDiag - leasKDiag)**2).flatten().mean())

    assert (error < tol)
def test_eval_IndPointsLocsAndAllTimesKMS():
    tol = 1e-5
    tolKzzi = 6e-2
    dataFilename = os.path.join(os.path.dirname(__file__),
                                "data/BuildKernelMatrices.mat")

    mat = loadmat(dataFilename)
    nLatents = mat['Z'].shape[0]
    nTrials = mat['Z'][0, 0].shape[2]
    t = torch.from_numpy(mat['tt']).type(torch.DoubleTensor).permute(2, 0, 1)
    Z0 = [
        torch.from_numpy(mat['Z'][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    leasKzz = [
        torch.from_numpy(mat['Kzz'][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    leasKzzi = [
        torch.from_numpy(mat['Kzzi'][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    leasKtz = [
        torch.from_numpy(mat['Ktz'][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    leasKttDiag = torch.from_numpy(mat['Ktt']).type(
        torch.DoubleTensor).permute(2, 0, 1)
    kernelNames = mat["kernelNames"]
    hprs = mat["hprs"]

    kernels = [[None] for k in range(nLatents)]
    kernelsParams0 = [[None] for k in range(nLatents)]
    for k in range(nLatents):
        if np.char.equal(kernelNames[0, k][0], "PeriodicKernel"):
            kernels[k] = PeriodicKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor(
                [float(hprs[k, 0][0]),
                 float(hprs[k, 0][1])],
                dtype=torch.double)
        elif np.char.equal(kernelNames[0, k][0], "rbfKernel"):
            kernels[k] = ExponentialQuadraticKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k, 0][0])],
                                             dtype=torch.double)
        else:
            raise ValueError("Invalid kernel name: %s" % (kernelNames[k]))

    dataFilename = os.path.join(os.path.dirname(__file__),
                                "data/BuildKernelMatrices_fromSpikes.mat")
    mat = loadmat(dataFilename)
    Y = [
        torch.from_numpy(mat['Y'][tr, 0]).type(torch.DoubleTensor)
        for tr in range(nTrials)
    ]
    leasKtz_spikes = [[
        torch.from_numpy(mat['Ktz'][i, j]).type(torch.DoubleTensor)
        for j in range(nTrials)
    ] for i in range(nLatents)]
    leasKttDiag_spikes = [[
        torch.from_numpy(mat['Ktt'][i, j]).type(torch.DoubleTensor)
        for j in range(nTrials)
    ] for i in range(nLatents)]

    kmsParams0 = {"kernelsParams0": kernelsParams0, "inducingPointsLocs0": Z0}

    indPointsLocsAndAllTimesKMS = IndPointsLocsAndAllTimesKMS()
    indPointsLocsAndAllTimesKMS.setKernels(kernels=kernels)
    indPointsLocsAndAllTimesKMS.setTimes(times=t)
    indPointsLocsAndAllTimesKMS.setInitialParams(initialParams=kmsParams0)
    indPointsLocsAndAllTimesKMS.buildKernelsMatrices()

    Ktz_allTimes = indPointsLocsAndAllTimesKMS.getKtz()
    for k in range(len(Ktz_allTimes)):
        error = math.sqrt(((Ktz_allTimes[k] - leasKtz[k])**2).flatten().mean())
        assert (error < tol)

    KttDiag_allTimes = indPointsLocsAndAllTimesKMS.getKttDiag()
    error = math.sqrt(((KttDiag_allTimes - leasKttDiag)**2).flatten().mean())
    assert (error < tol)
def test_computeMeansAndVars_assocTimes():
    tol = 5e-6
    dataFilename = os.path.join(os.path.dirname(__file__), "data/Estep_Objective_PointProcess_svGPFA.mat")

    mat = loadmat(dataFilename)
    nLatents = mat["Z"].shape[0]
    nTrials = mat["Z"][0,0].shape[2]
    qMu0 = [torch.from_numpy(mat["q_mu"][(0,i)]).type(torch.DoubleTensor).permute(2,0,1) for i in range(nLatents)]
    qSVec0 = [torch.from_numpy(mat["q_sqrt"][(0,i)]).type(torch.DoubleTensor).permute(2,0,1) for i in range(nLatents)]
    qSDiag0 = [torch.from_numpy(mat["q_diag"][(0,i)]).type(torch.DoubleTensor).permute(2,0,1) for i in range(nLatents)]
    Z0 = [torch.from_numpy(mat["Z"][(i,0)]).type(torch.DoubleTensor).permute(2,0,1) for i in range(nLatents)]
    Y = [torch.from_numpy(mat["Y"][tr,0]).type(torch.DoubleTensor) for tr in range(nTrials)]
    mu_k = [torch.from_numpy(mat["mu_k_Spikes"][0,tr]).type(torch.DoubleTensor) for tr in range(nTrials)]
    var_k = [torch.from_numpy(mat["var_k_Spikes"][0,tr]).type(torch.DoubleTensor) for tr in range(nTrials)]
    kernelNames = mat["kernelNames"]
    hprs = mat["hprs"]

    kernels = [[None] for k in range(nLatents)]
    kernelsParams0 = [[None] for k in range(nLatents)]
    for k in range(nLatents):
        if np.char.equal(kernelNames[0,k][0], "PeriodicKernel"):
            kernels[k] = PeriodicKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k,0][0]),
                                              float(hprs[k,0][1])],
                                             dtype=torch.double)
        elif np.char.equal(kernelNames[0,k][0], "rbfKernel"):
            kernels[k] = ExponentialQuadraticKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k,0][0])],
                                             dtype=torch.double)
        else:
            raise ValueError("Invalid kernel name: %s"%(kernelNames[k]))

    qU = SVPosteriorOnIndPoints()
    indPointsLocsKMS = IndPointsLocsKMS()
    indPointsLocsAndTimesKMS = IndPointsLocsAndAssocTimesKMS()
    qK = SVPosteriorOnLatentsAssocTimes(svPosteriorOnIndPoints=qU,
                                        indPointsLocsKMS=indPointsLocsKMS,
                                        indPointsLocsAndTimesKMS=
                                         indPointsLocsAndTimesKMS)

    quParams0 = {"qMu0": qMu0, "qSVec0": qSVec0, "qSDiag0": qSDiag0}
    kmsParams0 = {"kernelsParams0": kernelsParams0,
                  "inducingPointsLocs0": Z0}
    qU.setInitialParams(initialParams=quParams0)

    indPointsLocsKMS.setKernels(kernels=kernels)
    indPointsLocsKMS.setInitialParams(initialParams=kmsParams0)
    indPointsLocsKMS.buildKernelsMatrices()

    indPointsLocsAndTimesKMS.setKernels(kernels=kernels)
    indPointsLocsAndTimesKMS.setInitialParams(initialParams=kmsParams0)
    indPointsLocsAndTimesKMS.setTimes(times=Y)
    indPointsLocsAndTimesKMS.buildKernelsMatrices()

    qKMu, qKVar = qK.computeMeansAndVars()

    for tr in range(nTrials):
        qKMuError = math.sqrt(((mu_k[tr]-qKMu[tr])**2).mean())
        assert(qKMuError<tol)
        qKVarError = math.sqrt(((var_k[tr]-qKVar[tr])**2).mean())
        assert(qKVarError<tol)
def test_evalSumAcrossTrialsAndNeurons_pointProcessExpLink():
    tol = 3e-4
    yNonStackedFilename = os.path.join(os.path.dirname(__file__),
                                       "data/YNonStacked.mat")
    dataFilename = os.path.join(
        os.path.dirname(__file__),
        "data/Estep_Objective_PointProcess_svGPFA.mat")

    mat = loadmat(dataFilename)
    nLatents = len(mat['Z'])
    nTrials = mat['Z'][0, 0].shape[2]
    qMu0 = [
        torch.from_numpy(mat['q_mu'][(0, i)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    qSVec0 = [
        torch.from_numpy(mat['q_sqrt'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    qSDiag0 = [
        torch.from_numpy(mat['q_diag'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    t = torch.from_numpy(mat['ttQuad']).type(torch.DoubleTensor).permute(
        2, 0, 1)
    Z0 = [
        torch.from_numpy(mat['Z'][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    C0 = torch.from_numpy(mat["C"]).type(torch.DoubleTensor)
    b0 = torch.from_numpy(mat["b"]).type(torch.DoubleTensor).squeeze()
    hermQuadPoints = torch.from_numpy(mat['xxHerm']).type(torch.DoubleTensor)
    hermQuadWeights = torch.from_numpy(mat['wwHerm']).type(torch.DoubleTensor)
    legQuadPoints = torch.from_numpy(mat['ttQuad']).type(
        torch.DoubleTensor).permute(2, 0, 1)
    legQuadWeights = torch.from_numpy(mat['wwQuad']).type(
        torch.DoubleTensor).permute(2, 0, 1)
    Elik = torch.from_numpy(mat['Elik'])
    kernelNames = mat["kernelNames"]
    hprs = mat["hprs"]

    yMat = loadmat(yNonStackedFilename)
    YNonStacked_tmp = yMat['YNonStacked']
    nNeurons = YNonStacked_tmp[0, 0].shape[0]
    YNonStacked = [[[] for n in range(nNeurons)] for r in range(nTrials)]
    for r in range(nTrials):
        for n in range(nNeurons):
            YNonStacked[r][n] = torch.from_numpy(
                YNonStacked_tmp[r, 0][n, 0][:, 0]).type(torch.DoubleTensor)

    linkFunction = torch.exp

    kernels = [[None] for k in range(nLatents)]
    kernelsParams0 = [[None] for k in range(nLatents)]
    for k in range(nLatents):
        if np.char.equal(kernelNames[0, k][0], "PeriodicKernel"):
            kernels[k] = PeriodicKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor(
                [float(hprs[k, 0][0]),
                 float(hprs[k, 0][1])],
                dtype=torch.double)
        elif np.char.equal(kernelNames[0, k][0], "rbfKernel"):
            kernels[k] = ExponentialQuadraticKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k, 0][0])],
                                             dtype=torch.double)
        else:
            raise ValueError("Invalid kernel name: %s" % (kernelNames[k]))

    qUParams0 = {"qMu0": qMu0, "qSVec0": qSVec0, "qSDiag0": qSDiag0}
    qHParams0 = {"C0": C0, "d0": b0}
    kmsParams0 = {"kernelsParams0": kernelsParams0, "inducingPointsLocs0": Z0}
    initialParams = {
        "svPosteriorOnIndPoints": qUParams0,
        "kernelsMatricesStore": kmsParams0,
        "svEmbedding": qHParams0
    }
    quadParams = {
        "legQuadPoints": legQuadPoints,
        "legQuadWeights": legQuadWeights
    }

    qU = SVPosteriorOnIndPoints()
    indPointsLocsKMS = IndPointsLocsKMS()
    indPointsLocsAndAllTimesKMS = IndPointsLocsAndAllTimesKMS()
    indPointsLocsAndAssocTimesKMS = IndPointsLocsAndAssocTimesKMS()
    qKAllTimes = SVPosteriorOnLatentsAllTimes(
        svPosteriorOnIndPoints=qU,
        indPointsLocsKMS=indPointsLocsKMS,
        indPointsLocsAndTimesKMS=indPointsLocsAndAllTimesKMS)
    qKAssocTimes = SVPosteriorOnLatentsAssocTimes(
        svPosteriorOnIndPoints=qU,
        indPointsLocsKMS=indPointsLocsKMS,
        indPointsLocsAndTimesKMS=indPointsLocsAndAssocTimesKMS)
    qHAllTimes = LinearSVEmbeddingAllTimes(svPosteriorOnLatents=qKAllTimes)
    qHAssocTimes = LinearSVEmbeddingAssocTimes(
        svPosteriorOnLatents=qKAssocTimes)
    eLL = PointProcessELLExpLink(svEmbeddingAllTimes=qHAllTimes,
                                 svEmbeddingAssocTimes=qHAssocTimes)

    eLL.setKernels(kernels=kernels)
    eLL.setInitialParams(initialParams=initialParams)
    eLL.setMeasurements(measurements=YNonStacked)
    eLL.setQuadParams(quadParams=quadParams)
    eLL.buildKernelsMatrices()
    sELL = eLL.evalSumAcrossTrialsAndNeurons()

    sELLerror = abs(sELL - Elik)

    assert (sELLerror < tol)
示例#5
0
def test_evalSumAcrossLatentsTrials():
    tol = 1e-5
    dataFilename = os.path.join(
        os.path.dirname(__file__),
        "data/Estep_Objective_PointProcess_svGPFA.mat")

    mat = loadmat(dataFilename)
    nLatents = mat['q_sqrt'].shape[1]
    nTrials = mat['Z'][0, 0].shape[2]
    qMu0 = [
        torch.from_numpy(mat['q_mu'][(0, i)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    qSVec0 = [
        torch.from_numpy(mat['q_sqrt'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    qSDiag0 = [
        torch.from_numpy(mat['q_diag'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    Z0 = [
        torch.from_numpy(mat['Z'][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    matKLDiv = torch.from_numpy(mat['KLd'])
    kernelNames = mat["kernelNames"]
    hprs = mat["hprs"]

    kernels = [[None] for k in range(nLatents)]
    kernelsParams0 = [[None] for k in range(nLatents)]
    for k in range(nLatents):
        if np.char.equal(kernelNames[0, k][0], "PeriodicKernel"):
            kernels[k] = PeriodicKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor(
                [float(hprs[k, 0][0]),
                 float(hprs[k, 0][1])],
                dtype=torch.double)
        elif np.char.equal(kernelNames[0, k][0], "rbfKernel"):
            kernels[k] = ExponentialQuadraticKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k, 0][0])],
                                             dtype=torch.double)
        else:
            raise ValueError("Invalid kernel name: %s" % (kernelNames[k]))

    qUParams0 = {"qMu0": qMu0, "qSVec0": qSVec0, "qSDiag0": qSDiag0}
    kmsParams0 = {"kernelsParams0": kernelsParams0, "inducingPointsLocs0": Z0}

    indPointsLocsKMS = IndPointsLocsKMS()
    qU = SVPosteriorOnIndPoints()
    klDiv = KLDivergence(indPointsLocsKMS=indPointsLocsKMS,
                         svPosteriorOnIndPoints=qU)

    qU.setInitialParams(initialParams=qUParams0)
    indPointsLocsKMS.setKernels(kernels=kernels)
    indPointsLocsKMS.setInitialParams(initialParams=kmsParams0)
    indPointsLocsKMS.buildKernelsMatrices()
    klDivEval = klDiv.evalSumAcrossLatentsAndTrials()

    klError = abs(matKLDiv - klDivEval)

    assert (klError < tol)
示例#6
0
def test_computeMeansAndVars_allTimes():
    tol = 5e-6
    dataFilename = os.path.join(
        os.path.dirname(__file__),
        "data/Estep_Objective_PointProcess_svGPFA.mat")

    mat = loadmat(dataFilename)
    nLatents = mat["Z"].shape[0]
    nTrials = mat["Z"][0, 0].shape[2]
    qMu0 = [
        torch.from_numpy(mat["q_mu"][(0, i)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    qSVec0 = [
        torch.from_numpy(mat["q_sqrt"][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    qSDiag0 = [
        torch.from_numpy(mat["q_diag"][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    t = torch.from_numpy(mat["ttQuad"]).type(torch.DoubleTensor).permute(
        2, 0, 1)
    Z0 = [
        torch.from_numpy(mat["Z"][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    Y = [
        torch.from_numpy(mat["Y"][tr, 0]).type(torch.DoubleTensor)
        for tr in range(nTrials)
    ]
    C0 = torch.from_numpy(mat["C"]).type(torch.DoubleTensor)
    b0 = torch.from_numpy(mat["b"]).type(torch.DoubleTensor)
    mu_h = torch.from_numpy(mat["mu_h_Quad"]).type(torch.DoubleTensor).permute(
        2, 0, 1)
    var_h = torch.from_numpy(mat["var_h_Quad"]).type(
        torch.DoubleTensor).permute(2, 0, 1)
    kernelNames = mat["kernelNames"]
    hprs = mat["hprs"]

    kernels = [[None] for k in range(nLatents)]
    kernelsParams0 = [[None] for k in range(nLatents)]
    for k in range(nLatents):
        if np.char.equal(kernelNames[0, k][0], "PeriodicKernel"):
            kernels[k] = PeriodicKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor(
                [float(hprs[k, 0][0]),
                 float(hprs[k, 0][1])],
                dtype=torch.double)
        elif np.char.equal(kernelNames[0, k][0], "rbfKernel"):
            kernels[k] = ExponentialQuadraticKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k, 0][0])],
                                             dtype=torch.double)
        else:
            raise ValueError("Invalid kernel name: %s" % (kernelNames[k]))

    qU = SVPosteriorOnIndPoints()
    indPointsLocsKMS = IndPointsLocsKMS()
    indPointsLocsAndAllTimesKMS = IndPointsLocsAndAllTimesKMS()
    qK = SVPosteriorOnLatentsAllTimes(
        svPosteriorOnIndPoints=qU,
        indPointsLocsKMS=indPointsLocsKMS,
        indPointsLocsAndTimesKMS=indPointsLocsAndAllTimesKMS)
    qH = LinearSVEmbeddingAllTimes(svPosteriorOnLatents=qK)
    qH.setKernels(kernels=kernels)

    qUParams0 = {"qMu0": qMu0, "qSVec0": qSVec0, "qSDiag0": qSDiag0}
    qHParams0 = {"C0": C0, "d0": b0}
    kmsParams0 = {"kernelsParams0": kernelsParams0, "inducingPointsLocs0": Z0}
    initialParams = {
        "svPosteriorOnIndPoints": qUParams0,
        "kernelsMatricesStore": kmsParams0,
        "svEmbedding": qHParams0
    }
    qH.setInitialParams(initialParams=initialParams)
    qH.setTimes(times=t)
    qH.buildKernelsMatrices()
    qHMu, qHVar = qH.computeMeansAndVars()

    qHMuError = math.sqrt(((mu_h - qHMu)**2).mean())
    assert (qHMuError < tol)
    qHVarError = math.sqrt(((var_h - qHVar)**2).mean())
    assert (qHVarError < tol)
示例#7
0
def test_computeMeansAndVars_assocTimes():
    tol = 5e-6
    dataFilename = os.path.join(
        os.path.dirname(__file__),
        "data/Estep_Objective_PointProcess_svGPFA.mat")

    mat = loadmat(dataFilename)
    nLatents = mat["Z"].shape[0]
    nTrials = mat["Z"][0, 0].shape[2]
    qMu0 = [
        torch.from_numpy(mat["q_mu"][(0, i)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    qSVec0 = [
        torch.from_numpy(mat["q_sqrt"][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    qSDiag0 = [
        torch.from_numpy(mat["q_diag"][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    t = torch.from_numpy(mat["ttQuad"]).type(torch.DoubleTensor).permute(
        2, 0, 1)
    Z0 = [
        torch.from_numpy(mat["Z"][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    Y = [
        torch.from_numpy(mat["Y"][tr, 0]).type(torch.DoubleTensor)
        for tr in range(nTrials)
    ]
    C0 = torch.from_numpy(mat["C"]).type(torch.DoubleTensor)
    b0 = torch.from_numpy(mat["b"]).type(torch.DoubleTensor)
    mu_h = [
        torch.from_numpy(mat["mu_h_Spikes"][0, i]).type(
            torch.DoubleTensor).squeeze() for i in range(nTrials)
    ]
    var_h = [
        torch.from_numpy(mat["var_h_Spikes"][0, i]).type(
            torch.DoubleTensor).squeeze() for i in range(nTrials)
    ]
    index = [
        torch.from_numpy(mat["index"][i, 0][:, 0]).type(torch.ByteTensor) - 1
        for i in range(nTrials)
    ]

    kernelNames = mat["kernelNames"]
    hprs = mat["hprs"]
    kernels = [[None] for k in range(nLatents)]
    kernelsParams0 = [[None] for k in range(nLatents)]
    for k in range(nLatents):
        if np.char.equal(kernelNames[0, k][0], "PeriodicKernel"):
            kernels[k] = PeriodicKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor(
                [float(hprs[k, 0][0]),
                 float(hprs[k, 0][1])],
                dtype=torch.double)
        elif np.char.equal(kernelNames[0, k][0], "rbfKernel"):
            kernels[k] = ExponentialQuadraticKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k, 0][0])],
                                             dtype=torch.double)
        else:
            raise ValueError("Invalid kernel name: %s" % (kernelNames[k]))

    qU = SVPosteriorOnIndPoints()
    indPointsLocsKMS = IndPointsLocsKMS()
    indPointsLocsAndAssocTimesKMS = IndPointsLocsAndAssocTimesKMS()
    qK = SVPosteriorOnLatentsAssocTimes(
        svPosteriorOnIndPoints=qU,
        indPointsLocsKMS=indPointsLocsKMS,
        indPointsLocsAndTimesKMS=indPointsLocsAndAssocTimesKMS)
    qH = LinearSVEmbeddingAssocTimes(svPosteriorOnLatents=qK)
    qH.setKernels(kernels=kernels)

    qUParams0 = {"qMu0": qMu0, "qSVec0": qSVec0, "qSDiag0": qSDiag0}
    qHParams0 = {"C0": C0, "d0": b0}
    kmsParams0 = {"kernelsParams0": kernelsParams0, "inducingPointsLocs0": Z0}
    initialParams = {
        "svPosteriorOnIndPoints": qUParams0,
        "kernelsMatricesStore": kmsParams0,
        "svEmbedding": qHParams0
    }
    qH.setInitialParams(initialParams=initialParams)
    qH.setKernels(kernels=kernels)
    qH.setTimes(times=Y)
    qH.setNeuronForSpikeIndex(neuronForSpikeIndex=index)

    # begin patches because we are not using SVPosteriorOnLatentsAssocTimes in
    # conjunction with SVPosteriorOnLatentsAllTimes
    qU.setInitialParams(initialParams=qUParams0)
    indPointsLocsKMS.setKernels(kernels=kernels)
    indPointsLocsKMS.setInitialParams(initialParams=kmsParams0)
    indPointsLocsKMS.setKernels(kernels=kernels)
    indPointsLocsKMS.buildKernelsMatrices()
    # end patches because we are not using SVPosteriorOnLatentsAssocTimes in
    # conjunction with SVPosteriorOnLatentsAllTimes

    qH.buildKernelsMatrices()
    qHMu, qHVar = qH.computeMeansAndVars()

    for i in range(len(mu_h)):
        qHMuError = math.sqrt(torch.sum(
            (mu_h[i] - qHMu[i])**2)) / mu_h[i].shape[0]
        assert (qHMuError < tol)
        qHVarError = math.sqrt(torch.sum((var_h[i]-qHVar[i])**2))/\
                     var_h[i].shape[0]
        assert (qHVarError < tol)
示例#8
0
def test_maximize_pointProcess():
    tol = 1e-5
    yNonStackedFilename = os.path.join(os.path.dirname(__file__),
                                       "data/YNonStacked.mat")
    dataFilename = os.path.join(os.path.dirname(__file__),
                                "data/variationalEM.mat")
    # yNonStackedFilename = os.path.expanduser("~/tmp/svGPFA/ci/data/YNonStacked.mat")
    # dataFilename = os.path.expanduser("~/tmp/svGPFA/ci/data/variationalEM.mat")

    mat = loadmat(dataFilename)
    nLatents = len(mat['Z0'])
    nTrials = mat['Z0'][0, 0].shape[2]
    qMu0 = [
        torch.from_numpy(mat['q_mu0'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    qSVec0 = [
        torch.from_numpy(mat['q_sqrt0'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    qSDiag0 = [
        torch.from_numpy(mat['q_diag0'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    Z0 = [
        torch.from_numpy(mat['Z0'][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    C0 = torch.from_numpy(mat["C0"]).type(torch.DoubleTensor)
    b0 = torch.from_numpy(mat["b0"]).type(torch.DoubleTensor).squeeze()
    legQuadPoints = torch.from_numpy(mat['ttQuad']).type(
        torch.DoubleTensor).permute(2, 0, 1)
    legQuadWeights = torch.from_numpy(mat['wwQuad']).type(
        torch.DoubleTensor).permute(2, 0, 1)

    yMat = loadmat(yNonStackedFilename)
    YNonStacked_tmp = yMat['YNonStacked']
    nNeurons = YNonStacked_tmp[0, 0].shape[0]
    YNonStacked = [[[] for n in range(nNeurons)] for r in range(nTrials)]
    for r in range(nTrials):
        for n in range(nNeurons):
            YNonStacked[r][n] = torch.from_numpy(
                YNonStacked_tmp[r, 0][n, 0][:, 0]).type(torch.DoubleTensor)

    linkFunction = torch.exp

    kernelNames = mat["kernelNames"]
    hprs = mat["hprs0"]
    leasLowerBound = mat['lowerBound'][0, 0]
    kernels = [[None] for k in range(nLatents)]
    kernelsParams0 = [[None] for k in range(nLatents)]
    for k in range(nLatents):
        if np.char.equal(kernelNames[0, k][0], "PeriodicKernel"):
            kernels[k] = PeriodicKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor(
                [float(hprs[k, 0][0]),
                 float(hprs[k, 0][1])],
                dtype=torch.double)
        elif np.char.equal(kernelNames[0, k][0], "rbfKernel"):
            kernels[k] = ExponentialQuadraticKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k, 0][0])],
                                             dtype=torch.double)
        else:
            raise ValueError("Invalid kernel name: %s" % (kernelNames[k]))

    indPointsLocsKMSEpsilon = 1e-2
    qU = SVPosteriorOnIndPoints()
    indPointsLocsKMS = IndPointsLocsKMS(epsilon=indPointsLocsKMSEpsilon)
    indPointsLocsAndAllTimesKMS = IndPointsLocsAndAllTimesKMS()
    indPointsLocsAndAssocTimesKMS = IndPointsLocsAndAssocTimesKMS()
    qKAllTimes = SVPosteriorOnLatentsAllTimes(
        svPosteriorOnIndPoints=qU,
        indPointsLocsKMS=indPointsLocsKMS,
        indPointsLocsAndTimesKMS=indPointsLocsAndAllTimesKMS)
    qKAssocTimes = SVPosteriorOnLatentsAssocTimes(
        svPosteriorOnIndPoints=qU,
        indPointsLocsKMS=indPointsLocsKMS,
        indPointsLocsAndTimesKMS=indPointsLocsAndAssocTimesKMS)
    qHAllTimes = LinearSVEmbeddingAllTimes(svPosteriorOnLatents=qKAllTimes)
    qHAssocTimes = LinearSVEmbeddingAssocTimes(
        svPosteriorOnLatents=qKAssocTimes)
    eLL = PointProcessELLExpLink(svEmbeddingAllTimes=qHAllTimes,
                                 svEmbeddingAssocTimes=qHAssocTimes)
    klDiv = KLDivergence(indPointsLocsKMS=indPointsLocsKMS,
                         svPosteriorOnIndPoints=qU)
    svlb = SVLowerBound(eLL=eLL, klDiv=klDiv)
    svlb.setKernels(kernels=kernels)
    svEM = SVEM()

    qUParams0 = {"qMu0": qMu0, "qSVec0": qSVec0, "qSDiag0": qSDiag0}
    qHParams0 = {"C0": C0, "d0": b0}
    kmsParams0 = {"kernelsParams0": kernelsParams0, "inducingPointsLocs0": Z0}
    initialParams = {
        "svPosteriorOnIndPoints": qUParams0,
        "kernelsMatricesStore": kmsParams0,
        "svEmbedding": qHParams0
    }
    quadParams = {
        "legQuadPoints": legQuadPoints,
        "legQuadWeights": legQuadWeights
    }
    '''
    optimParams = {"emMaxNIter":20, 
                   #
                   "eStepMaxNIter":100,
                   "eStepTol":1e-3,
                   "eStepLR":1e-3,
                   "eStepNIterDisplay":10,
                   #
                   "mStepModelParamsMaxNIter":100,
                   "mStepModelParamsTol":1e-3,
                   "mStepModelParamsLR":1e-3,
                   "mStepModelParamsNIterDisplay":10,
                   #
                   "mStepKernelParamsMaxNIter":100, 
                   "mStepKernelParamsTol":1e-3,
                   "mStepKernelParamsLR":1e-5,
                   "mStepKernelParamsNIterDisplay":10,
                   #
                   "mStepIndPointsMaxNIter":100,
                   "mStepIndPointsParamsTol":1e-3,
                   "mStepIndPointsLR":1e-3, 
                   "mStepIndPointsNIterDisplay":10}
    '''
    optimParams = {
        "emMaxNIter": 3,
        #
        "eStepEstimate": True,
        "eStepMaxNIter": 20,
        "eStepTol": 1e-2,
        "eStepLR": 1e-2,
        "eStepLineSearchFn": "strong_wolfe",
        "eStepNIterDisplay": 1,
        #
        "mStepModelParamsEstimate": True,
        "mStepModelParamsMaxNIter": 20,
        "mStepModelParamsTol": 1e-2,
        "mStepModelParamsLR": 1e-3,
        "mStepModelParamsLineSearchFn": "strong_wolfe",
        "mStepModelParamsNIterDisplay": 1,
        #
        "mStepKernelParamsEstimate": True,
        "mStepKernelParamsMaxNIter": 20,
        "mStepKernelParamsTol": 1e-2,
        "mStepKernelParamsLR": 1e-4,
        "mStepKernelParamsLineSearchFn": "strong_wolfe",
        "mStepKernelParamsNIterDisplay": 1,
        #
        "mStepIndPointsEstimate": True,
        "mStepIndPointsMaxNIter": 20,
        "mStepIndPointsTol": 1e-2,
        "mStepIndPointsLR": 1e-3,
        "mStepIndPointsLineSearchFn": "strong_wolfe",
        "mStepIndPointsNIterDisplay": 1,
        #
        "verbose": True
    }
    lowerBoundHist, elapsedTimeHist = svEM.maximize(
        model=svlb,
        measurements=YNonStacked,
        initialParams=initialParams,
        quadParams=quadParams,
        optimParams=optimParams,
        plotLatentsEstimates=False)
    assert (lowerBoundHist[-1] > leasLowerBound)
示例#9
0
def test_mStepIndPoints_pointProcess():
    tol = 1e-5
    yNonStackedFilename = os.path.join(os.path.dirname(__file__),
                                       "data/YNonStacked.mat")
    dataFilename = os.path.join(os.path.dirname(__file__),
                                "data/inducingPointsMstep_all.mat")

    mat = loadmat(dataFilename)
    nLatents = len(mat['Z0'])
    nTrials = mat['Z0'][0, 0].shape[2]
    qMu0 = [
        torch.from_numpy(mat['q_mu'][(0, i)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    qSVec0 = [
        torch.from_numpy(mat['q_sqrt'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    qSDiag0 = [
        torch.from_numpy(mat['q_diag'][(0, i)]).type(
            torch.DoubleTensor).permute(2, 0, 1) for i in range(nLatents)
    ]
    Z0 = [
        torch.from_numpy(mat['Z0'][(i, 0)]).type(torch.DoubleTensor).permute(
            2, 0, 1) for i in range(nLatents)
    ]
    C0 = torch.from_numpy(mat["C"]).type(torch.DoubleTensor)
    b0 = torch.from_numpy(mat["b"]).type(torch.DoubleTensor).squeeze()
    nLowerBound = mat['nLowerBound'][0, 0]
    legQuadPoints = torch.from_numpy(mat['ttQuad']).type(
        torch.DoubleTensor).permute(2, 0, 1)
    legQuadWeights = torch.from_numpy(mat['wwQuad']).type(
        torch.DoubleTensor).permute(2, 0, 1)

    yMat = loadmat(yNonStackedFilename)
    YNonStacked_tmp = yMat['YNonStacked']
    nNeurons = YNonStacked_tmp[0, 0].shape[0]
    YNonStacked = [[[] for n in range(nNeurons)] for r in range(nTrials)]
    for r in range(nTrials):
        for n in range(nNeurons):
            YNonStacked[r][n] = torch.from_numpy(
                YNonStacked_tmp[r, 0][n, 0][:, 0]).type(torch.DoubleTensor)

    linkFunction = torch.exp

    kernelNames = mat["kernelNames"]
    hprs = mat["hprs"]
    kernels = [[None] for k in range(nLatents)]
    kernelsParams0 = [[None] for k in range(nLatents)]
    for k in range(nLatents):
        if np.char.equal(kernelNames[0, k][0], "PeriodicKernel"):
            kernels[k] = PeriodicKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor(
                [float(hprs[k, 0][0]),
                 float(hprs[k, 0][1])],
                dtype=torch.double)
        elif np.char.equal(kernelNames[0, k][0], "rbfKernel"):
            kernels[k] = ExponentialQuadraticKernel(scale=1.0)
            kernelsParams0[k] = torch.tensor([float(hprs[k, 0][0])],
                                             dtype=torch.double)
        else:
            raise ValueError("Invalid kernel name: %s" % (kernelNames[k]))

    indPointsLocsKMSEpsilon = 1e-5
    qU = SVPosteriorOnIndPoints()
    indPointsLocsKMS = IndPointsLocsKMS(epsilon=indPointsLocsKMSEpsilon)
    indPointsLocsAndAllTimesKMS = IndPointsLocsAndAllTimesKMS()
    indPointsLocsAndAssocTimesKMS = IndPointsLocsAndAssocTimesKMS()
    qKAllTimes = SVPosteriorOnLatentsAllTimes(
        svPosteriorOnIndPoints=qU,
        indPointsLocsKMS=indPointsLocsKMS,
        indPointsLocsAndTimesKMS=indPointsLocsAndAllTimesKMS)
    qKAssocTimes = SVPosteriorOnLatentsAssocTimes(
        svPosteriorOnIndPoints=qU,
        indPointsLocsKMS=indPointsLocsKMS,
        indPointsLocsAndTimesKMS=indPointsLocsAndAssocTimesKMS)
    qHAllTimes = LinearSVEmbeddingAllTimes(svPosteriorOnLatents=qKAllTimes)
    qHAssocTimes = LinearSVEmbeddingAssocTimes(
        svPosteriorOnLatents=qKAssocTimes)
    eLL = PointProcessELLExpLink(svEmbeddingAllTimes=qHAllTimes,
                                 svEmbeddingAssocTimes=qHAssocTimes)
    klDiv = KLDivergence(indPointsLocsKMS=indPointsLocsKMS,
                         svPosteriorOnIndPoints=qU)
    svlb = SVLowerBound(eLL=eLL, klDiv=klDiv)
    svEM = SVEM()

    qUParams0 = {"qMu0": qMu0, "qSVec0": qSVec0, "qSDiag0": qSDiag0}
    qHParams0 = {"C0": C0, "d0": b0}
    kmsParams0 = {"kernelsParams0": kernelsParams0, "inducingPointsLocs0": Z0}
    initialParams = {
        "svPosteriorOnIndPoints": qUParams0,
        "kernelsMatricesStore": kmsParams0,
        "svEmbedding": qHParams0
    }
    quadParams = {
        "legQuadPoints": legQuadPoints,
        "legQuadWeights": legQuadWeights
    }

    svlb.setKernels(kernels=kernels)
    svlb.setInitialParams(initialParams=initialParams)
    svlb.setMeasurements(measurements=YNonStacked)
    svlb.setQuadParams(quadParams=quadParams)
    svlb.buildKernelsMatrices()

    res = svEM._mStepIndPoints(model=svlb,
                               maxNIter=10,
                               tol=1e-3,
                               lr=1e-3,
                               lineSearchFn="strong_wolfe",
                               verbose=True,
                               nIterDisplay=1)

    assert (res["lowerBound"] > (-nLowerBound))