class Model2(Model): def __init__(self, N): super(Model2, self).__init__(N=N) self.p_B = Marginal(N) self.p_A_B = Conditional(N) def forward(self, inputs): # Compute the (negative) log-likelihood with the # decomposition p(x_A, x_B) = p(x_B)p(x_A | x_B) inputs_A, inputs_B = torch.split(inputs, 1, dim=1) inputs_A, inputs_B = inputs_A.squeeze(1), inputs_B.squeeze(1) return self.p_B(inputs_B) + self.p_A_B(inputs_B, inputs_A) def set_analytical_maximum_likelihood(self, pi_A, pi_B_A): pi_A_th = torch.from_numpy(pi_A) pi_B_A_th = torch.from_numpy(pi_B_A) log_joint = torch.log(pi_A_th.unsqueeze(1)) + torch.log(pi_B_A_th) log_p_B = torch.logsumexp(log_joint, dim=0) self.p_B.w.data = log_p_B self.p_A_B.w.data = log_joint.t() - log_p_B.unsqueeze(1) def init_parameters(self): self.p_B.init_parameters() self.p_A_B.init_parameters()
class Model1(Model): def __init__(self, N): super(Model1, self).__init__(N=N) self.p_A = Marginal(N) self.p_B_A = Conditional(N) def forward(self, inputs): # Compute the (negative) log-likelihood with the # decomposition p(x_A, x_B) = p(x_A)p(x_B | x_A) inputs_A, inputs_B = torch.split(inputs, 1, dim=1) inputs_A, inputs_B = inputs_A.squeeze(1), inputs_B.squeeze(1) return self.p_A(inputs_A) + self.p_B_A(inputs_A, inputs_B) def set_analytical_maximum_likelihood(self, pi_A, pi_B_A): pi_A_th = torch.from_numpy(pi_A) pi_B_A_th = torch.from_numpy(pi_B_A) self.p_A.w.data = torch.log(pi_A_th) self.p_B_A.w.data = torch.log(pi_B_A_th) def init_parameters(self): self.p_A.init_parameters() self.p_B_A.init_parameters()
def __init__(self, N, dtype=None): Model.__init__(self, N) ModelB2A.__init__(self, Marginal(N, dtype=dtype), Conditional(N, dtype=dtype))
def __init__(self, N): super(Model2, self).__init__(N=N) self.p_B = Marginal(N) self.p_A_B = Conditional(N)
def __init__(self, N, dtype=None): Model.__init__(self, N) ModelA2B.__init__(self, MixtureMarginal(N, dtype=dtype), Conditional(N, dtype=dtype))