Esempio n. 1
0
 def _logDerivFactorSigma(self, sample, x, invSigma, factorSigma):
     logDerivSigma = 0.5 * dot(dot(invSigma, outer(sample - x, sample - x)),
                               invSigma) - 0.5 * invSigma
     if self.vanillaScale:
         logDerivSigma = multiply(
             outer(diag(abs(self.factorSigma)),
                   diag(abs(self.factorSigma))), logDerivSigma)
     return triu2flat(dot(factorSigma, (logDerivSigma + logDerivSigma.T)))
Esempio n. 2
0
    def _logDerivsFactorSigma(self, samples, mu, invSigma, factorSigma):
        """ Compute the log-derivatives w.r.t. the factorized covariance matrix components.
        This implementation should be faster than the one in Vanilla. """
        res = zeros((len(samples), self.numDistrParams - self.numParameters))
        invA = inv(factorSigma)
        diagInvA = diag(diag(invA))

        for i, sample in enumerate(samples):
            s = dot(invA.T, (sample - mu))
            R = outer(s, dot(invA, s)) - diagInvA
            res[i] = triu2flat(R)
        return res
Esempio n. 3
0
    def _logDerivsFactorSigma(self, samples, mu, invSigma, factorSigma):
        """ Compute the log-derivatives w.r.t. the factorized covariance matrix components.
        This implementation should be faster than the one in Vanilla. """
        res = zeros((len(samples), self.numDistrParams - self.numParameters))
        invA = inv(factorSigma)
        diagInvA = diag(diag(invA))

        for i, sample in enumerate(samples):
            s = dot(invA.T, (sample - mu))
            R = outer(s, dot(invA, s)) - diagInvA
            res[i] = triu2flat(R)
        return res
Esempio n. 4
0
File: ves.py Progetto: avain/pybrain
 def _logDerivFactorSigma(self, sample, x, invSigma, factorSigma):
     logDerivSigma = 0.5 * dot(dot(invSigma, outer(sample - x, sample - x)), invSigma) - 0.5 * invSigma
     if self.vanillaScale:
         logDerivSigma = multiply(outer(diag(abs(self.factorSigma)), diag(abs(self.factorSigma))), logDerivSigma)
     return triu2flat(dot(factorSigma, (logDerivSigma + logDerivSigma.T)))
Esempio n. 5
0
    def _calcBatchUpdate(self, fitnesses):
        samples = self.allSamples[-self.batchSize:]
        d = self.numParameters
        invA = inv(self.factorSigma)
        invSigma = inv(self.sigma)
        diagInvA = diag(diag(invA))

        # efficient computation of V, which corresponds to inv(Fisher)*logDerivs
        V = zeros((self.numDistrParams, self.batchSize))
        # u is used to compute the uniform baseline
        u = zeros(self.numDistrParams)
        for i in range(self.batchSize):
            s = dot(invA.T, (samples[i] - self.x))
            R = outer(s, dot(invA, s)) - diagInvA
            flatR = triu2flat(R)
            u[:d] += fitnesses[i] * (samples[i] - self.x)
            u[d:] += fitnesses[i] * flatR
            V[:d, i] += samples[i] - self.x
            V[d:, i] += flatR

        j = self.numDistrParams - 1
        D = 1 / invSigma[-1, -1]
        # G corresponds to the blocks of the inv(Fisher)
        G = 1 / (invSigma[-1, -1] + invA[-1, -1]**2)

        u[j] = dot(G, u[j])
        V[j, :] = dot(G, V[j, :])
        j -= 1
        for k in reversed(range(d - 1)):
            p = invSigma[k + 1:, k]
            w = invSigma[k, k]
            wg = w + invA[k, k]**2
            q = dot(D, p)
            c = dot(p, q)
            r = 1 / (w - c)
            rg = 1 / (wg - c)
            t = -(1 + r * c) / w
            tg = -(1 + rg * c) / wg

            G = blockCombine([[rg, tg * q],
                              [mat(tg * q).T, D + rg * outer(q, q)]])
            D = blockCombine([[r, t * q], [mat(t * q).T, D + r * outer(q, q)]])
            u[j - (d - k - 1):j + 1] = dot(G, u[j - (d - k - 1):j + 1])
            V[j - (d - k - 1):j + 1, :] = dot(G, V[j - (d - k - 1):j + 1, :])
            j -= d - k

        # determine the update vector, according to different baselines.
        if self.baselineType == self.BLOCKBASELINE:
            update = zeros(self.numDistrParams)
            vsquare = multiply(V, V)
            j = self.numDistrParams - 1
            for k in reversed(range(self.numParameters)):
                b0 = sum(vsquare[j - (d - k - 1):j + 1, :], 0)
                b = dot(b0, fitnesses) / sum(b0)
                update[j - (d - k - 1):j + 1] = dot(
                    V[j - (d - k - 1):j + 1, :], (fitnesses - b))
                j -= d - k
            b0 = sum(vsquare[:j + 1, :], 0)
            b = dot(b0, fitnesses) / sum(b0)
            update[:j + 1] = dot(V[:j + 1, :], (fitnesses - b))

        elif self.baselineType == self.SPECIFICBASELINE:
            update = zeros(self.numDistrParams)
            vsquare = multiply(V, V)
            for j in range(self.numDistrParams):
                b = dot(vsquare[j, :], fitnesses) / sum(vsquare[j, :])
                update[j] = dot(V[j, :], (fitnesses - b))

        elif self.baselineType == self.UNIFORMBASELINE:
            v = sum(V, 1)
            update = u - dot(v, u) / dot(v, v) * v

        elif self.baselineType == self.NOBASELINE:
            update = dot(V, fitnesses)

        else:
            raise NotImplementedError('No such baseline implemented')

        return update / self.batchSize
Esempio n. 6
0
    def _calcBatchUpdate(self, fitnesses):
        samples = self.allSamples[-self.batchSize:]
        d = self.numParameters
        invA = inv(self.factorSigma)
        invSigma = inv(self.sigma)
        diagInvA = diag(diag(invA))

        # efficient computation of V, which corresponds to inv(Fisher)*logDerivs
        V = zeros((self.numDistrParams, self.batchSize))
        # u is used to compute the uniform baseline
        u = zeros(self.numDistrParams)
        for i in range(self.batchSize):
            s = dot(invA.T, (samples[i] - self.x))
            R = outer(s, dot(invA, s)) - diagInvA
            flatR = triu2flat(R)
            u[:d] += fitnesses[i] * (samples[i] - self.x)
            u[d:] += fitnesses[i] * flatR
            V[:d, i] += samples[i] - self.x
            V[d:, i] += flatR

        j = self.numDistrParams - 1
        D = 1 / invSigma[-1, -1]
        # G corresponds to the blocks of the inv(Fisher)
        G = 1 / (invSigma[-1, -1] + invA[-1, -1] ** 2)

        u[j] = dot(G, u[j])
        V[j, :] = dot(G, V[j, :])
        j -= 1
        for k in reversed(range(d - 1)):
            p = invSigma[k + 1:, k]
            w = invSigma[k, k]
            wg = w + invA[k, k] ** 2
            q = dot(D, p)
            c = dot(p, q)
            r = 1 / (w - c)
            rg = 1 / (wg - c)
            t = -(1 + r * c) / w
            tg = -(1 + rg * c) / wg

            G = blockCombine([[rg, tg * q],
                              [mat(tg * q).T, D + rg * outer(q, q)]])
            D = blockCombine([[r , t * q],
                              [mat(t * q).T, D + r * outer(q, q)]])
            u[j - (d - k - 1):j + 1] = dot(G, u[j - (d - k - 1):j + 1])
            V[j - (d - k - 1):j + 1, :] = dot(G, V[j - (d - k - 1):j + 1, :])
            j -= d - k


        # determine the update vector, according to different baselines.
        if self.baselineType == self.BLOCKBASELINE:
            update = zeros(self.numDistrParams)
            vsquare = multiply(V, V)
            j = self.numDistrParams - 1
            for k in reversed(range(self.numParameters)):
                b0 = sum(vsquare[j - (d - k - 1):j + 1, :], 0)
                b = dot(b0, fitnesses) / sum(b0)
                update[j - (d - k - 1):j + 1] = dot(V[j - (d - k - 1):j + 1, :], (fitnesses - b))
                j -= d - k
            b0 = sum(vsquare[:j + 1, :], 0)
            b = dot(b0, fitnesses) / sum(b0)
            update[:j + 1] = dot(V[:j + 1, :], (fitnesses - b))

        elif self.baselineType == self.SPECIFICBASELINE:
            update = zeros(self.numDistrParams)
            vsquare = multiply(V, V)
            for j in range(self.numDistrParams):
                b = dot(vsquare[j, :], fitnesses) / sum(vsquare[j, :])
                update[j] = dot(V[j, :], (fitnesses - b))

        elif self.baselineType == self.UNIFORMBASELINE:
            v = sum(V, 1)
            update = u - dot(v, u) / dot(v, v) * v

        elif self.baselineType == self.NOBASELINE:
            update = dot(V, fitnesses)

        else:
            raise NotImplementedError('No such baseline implemented')

        return update / self.batchSize