Example #1
0
class Validator:
    params = ['shd', 'tpr', 'fpr', 'err']
    
    def __init__(self, st_truth):
        self.log = Logger(*Validator.params)
        self.st_truth = st_truth
        self.n_nodes = self.st_truth.A.shape[-1]
        self.n_edges = len(self.st_truth.A.nonzero())
        self.l1 = torch.sum(torch.abs(self.st_truth.A)).item()

    def validate(self, st_learned):
        shd, tpr, fpr, _, _ = count_accuracy(self.st_truth.A.numpy(), 
                                             st_learned.topk(self.n_edges).numpy())
        err = adjacency_error(self.st_truth.A.numpy(), 
                              st_learned.A.numpy())
        self.log.append(self.logger(shd, tpr, fpr, err))
    
    def logger(self, shd, tpr, fpr, err):
        return dict(zip(Validator.params, (shd, tpr, fpr, err)))
    
    def print_summary(self):
        print(3*" "+print_format([self.log]))
        
    def get_logger(self, var):
        if var in Validator.params:
            return self.log
        else:
            raise ValueError("invalid parameter %s"%var)
Example #2
0
class ELBOLoss(nn.Module):
    params = ['elbo', 'kld', 'nll']

    def __init__(self, opt=None):
        super(ELBOLoss, self).__init__()
        self.log = Logger(*ELBOLoss.params)
        self._log = Logger(*ELBOLoss.params)
        self.kld = None
        self.nll = None
        self.opt = opt

    def __call__(self, x, z, xx):
        self.kld = Losses.kld(z)
        self.nll = Losses.nll(x, xx)
        self._log.append(self.logger())

        if self.opt is not None:
            loss = self.kld + self.nll
            loss.backward()
            self.opt.step()
            self.opt.optimizer.zero_grad()
        else:
            return self.kld, self.nll

    def evolve(self):
        self.log.append(self._log.mean())
        self._log.clear()

        if self.opt is not None:
            self.opt.evolve()

    def logger(self):
        kld, nll = self.kld.item(), self.nll.item()
        return dict(zip(ELBOLoss.params, (kld + nll, kld, nll)))

    def print_summary(self):
        print(3 * " " + print_format([self.log, self.opt.log], log10=True))

    def get_logger(self, var):
        if var in ELBOLoss.params:
            return self.log
        elif var in OptimModule.params:
            return self.opt.log
        else:
            raise ValueError("invalid parameter %s" % var)
Example #3
0
class InteractionLoss(nn.Module):
    params = ['h', 'l1']

    def __init__(self, acyc, coeffs, opt=None):
        super(InteractionLoss, self).__init__()
        self.log = Logger(*InteractionLoss.params)
        self._log = Logger(*InteractionLoss.params)
        self.norm = lambda x: torch.sum(torch.abs(x))
        self.acyc = acyc
        self.coeffs = coeffs

        self.adj = None
        self.quad = None
        self.reg = None
        self.opt = opt

    @classmethod
    def poly(cls, n_nodes, **coeffs):
        acyc = lambda x: InteractionLoss.h_poly(x, n_nodes)
        return cls(acyc, Coefficients(**coeffs))

    @classmethod
    def ordered(cls, node_dict, **coeffs):
        acyc = lambda x: InteractionLoss.h_ordered(x, node_dict)
        return cls(acyc, Coefficients(**coeffs))

    def __call__(self, adj):
        self.quad = self.coeffs.l * self.acyc(
            adj) + 0.5 * self.coeffs.c * self.acyc(adj)**2
        self.reg = self.coeffs.tau * self.norm(
            adj) + self.coeffs.tr * torch.trace(adj**2)
        self.adj = adj.detach()
        self._log.append(self.logger())

        if self.opt is not None:
            loss = self.quad + self.reg
            loss.backward()
            self.opt.step()
            self.opt.optimizer.zero_grad()
        else:
            return self.quad, self.reg

    def evolve(self):
        self.log.append(self._log.last())
        self._log.clear()

        self.coeffs.log.append(self.coeffs.logger())

        if self.opt is not None:
            self.opt.evolve()

    def logger(self):
        return {
            'h': self.acyc(self.adj).item(),
            'l1': self.norm(self.adj).item()
        }

    def get_logger(self, var):
        if var in InteractionLoss.params:
            return self.log
        elif var in OptimModule.params:
            return self.opt.log
        else:
            raise ValueError("invalid parameter %s" % var)

    @staticmethod
    def h_poly(adj, n_nodes):
        alpha = (1 / n_nodes)
        x = torch.eye(n_nodes) + alpha * (adj**2)
        return torch.trace(torch.matrix_power(x, n_nodes)) - n_nodes

    @staticmethod
    def h_ordered(adj, node_dict):
        h, step = 0.0, 0
        for t, X in node_dict.items():
            h += torch.sum(adj[step:, step:step + len(X)]**2)
            step += len(X)
        return h
Example #4
0
class InfoLoss(nn.Module):
    params = ['elbo', 'mmd', 'nll']

    def __init__(self, mask, chain, opt, beta=1.0, gamma=500.0, reg=0.2):
        super(InfoLoss, self).__init__()
        self.log = Logger(*InfoLoss.params)
        self._log = Logger(*InfoLoss.params)

        self.mask = mask
        self.chain = tuple(chain)
        self.opt = opt

        self.beta = beta
        self.gamma = gamma
        self.reg = reg

        self.mmd = None
        self.nll = None

    def __call__(self, x, z, zz, xx):
        std_norm = rand_norm(0.0, 1.0, z.shape[0],
                             z.shape[1]).unsqueeze(-1).double()

        self.mmd = self.beta * Losses.cmmd(z, std_norm)
        self.mmd += self.gamma * (
            Losses.cmmd(
                zz, z, endo=self.chain[0:1], exo=self.chain[1:2], l=self.reg) +
            Losses.cmmd(
                zz, z, endo=self.chain[1:2], exo=self.chain[2:3], l=self.reg) +
            Losses.cmmd(
                zz, z, endo=self.chain[0:1], exo=self.chain[2:3], l=self.reg))
        self.nll = Losses.nll(x, xx)

        self._log.append(self.logger())

        if self.opt is not None:
            loss = self.mmd + self.nll
            loss.backward()
            self.opt.step()
            self.opt.optimizer.zero_grad()
        else:
            return self.mmd, self.nll

    def evolve(self):
        self.log.append(self._log.mean())
        self._log.clear()

        if self.opt is not None:
            self.opt.evolve()

    def logger(self):
        mmd, nll = self.mmd.item(), self.nll.item()
        return dict(zip(InfoLoss.params, (mmd + nll, mmd, nll)))

    def print_summary(self):
        print(3 * " " + print_format([self.log, self.opt.log], log10=True))

    def get_logger(self, var):
        if var in InfoLoss.params:
            return self.log
        elif var in OptimModule.params:
            return self.opt.log
        else:
            raise ValueError("invalid parameter %s" % var)