Esempio n. 1
0
def marginal_loss(marginals, workload, cache):
    answers = []
    for proj, W in workload:
        for cl in marginals:
            if set(proj) <= set(cl):
                mu = marginals[cl].project(proj)
                x = mu.values.flatten()
                answers.append(W.dot(x))
                break
    total = x.sum()
    answers = np.concatenate(answers) / total

    gradient = grad(log_likelihood, argnum=0)
    loss = log_likelihood(answers, cache)
    danswers = gradient(answers, cache)

    i = 0
    gradients = {cl: Factor.zeros(marginals[cl].domain) for cl in marginals}
    for proj, W in workload:
        for cl in marginals:
            if set(proj) <= set(cl):
                m = W.shape[0]
                dmu = W.T.dot(danswers[i:i + m]) / total
                dom = gradients[cl].domain.project(proj)
                gradients[cl] += Factor(dom, dmu)
                i += m
                break

    print(loss)
    return loss, graphical_model.CliqueVector(gradients)
Esempio n. 2
0
    def multWeightsFast(self, measurements, total):
        domain = self.domain
        groups, projections = _cluster(measurements)
        factors = []
        for group, proj  in zip(groups, projections):
            dom = self.domain.project(proj)
            fact = Factor.uniform(dom)
            for i in range(self.iters):
                update = Factor.zeros(dom)
                for Q, y, noise_scale, p in group:
                    dom2 = dom.project(p)
                    hatx = fact.project(p).values.flatten()*total
                    error = y - Q.dot(hatx)
                    update += Factor(dom2, Q.T.dot(error).reshape(dom2.shape))
                fact *= np.exp(update / (2*total))
                fact /= fact.sum()
            factors.append(fact)

        self.model = ProductDist(factors, self.domain, total)