Ejemplo n.º 1
0
 def trainD(self, x_label, y, x_unlabel):
     x_label, x_unlabel, y = Variable(x_label), Variable(
         x_unlabel), Variable(y, requires_grad=False)
     if self.args.cuda:
         x_label, x_unlabel, y = x_label.cuda(), x_unlabel.cuda(), y.cuda()
     output_label, output_unlabel, output_fake = self.D(
         x_label, cuda=self.args.cuda), self.D(
             x_unlabel, cuda=self.args.cuda), self.D(self.G(
                 x_unlabel.size()[0],
                 cuda=self.args.cuda).view(x_unlabel.size()).detach(),
                                                     cuda=self.args.cuda)
     logz_label, logz_unlabel, logz_fake = log_sum_exp(
         output_label), log_sum_exp(output_unlabel), log_sum_exp(
             output_fake)  # log ∑e^x_i
     prob_label = torch.gather(output_label, 1,
                               y.unsqueeze(1))  # log e^x_label = x_label
     loss_supervised = -torch.mean(prob_label) + torch.mean(logz_label)
     loss_unsupervised = 0.5 * (
         -torch.mean(logz_unlabel) +
         torch.mean(F.softplus(logz_unlabel)) +  # real_data: log Z/(1+Z)
         torch.mean(F.softplus(logz_fake)))  # fake_data: log 1/(1+Z)
     loss = loss_supervised + self.args.unlabel_weight * loss_unsupervised
     acc = torch.mean((output_label.max(1)[1] == y).float())
     self.Doptim.zero_grad()
     loss.backward()
     self.Doptim.step()
     return loss_supervised.data.cpu().numpy(), loss_unsupervised.data.cpu(
     ).numpy(), acc
Ejemplo n.º 2
0
    def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        init_alphas = torch.full((1, self.tagset_size), -10000.)
        # START_TAG has all of the score.
        init_alphas[0][self.tag_to_ix[self.START_TAG]] = 0.

        # Wrap in a variable so that we will get automatic backprop
        forward_var = init_alphas

        # Iterate through the sentence
        for feat in feats:
            alphas_t = []  # The forward tensors at this timestep
            for next_tag in range(self.tagset_size):
                # broadcast the emission score: it is the same regardless of
                # the previous tag
                emit_score = feat[next_tag].view(1, -1).expand(
                    1, self.tagset_size)
                # the ith entry of trans_score is the score of transitioning to
                # next_tag from i
                trans_score = self.transitions[next_tag].view(1, -1)
                # The ith entry of next_tag_var is the value for the
                # edge (i -> next_tag) before we do log-sum-exp
                next_tag_var = forward_var + trans_score + emit_score
                # The forward variable for this tag is log-sum-exp of all the
                # scores.
                alphas_t.append(log_sum_exp(next_tag_var).view(1))
            forward_var = torch.cat(alphas_t).view(1, -1)
        terminal_var = forward_var + self.transitions[self.tag_to_ix[
            self.STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha
def array_transform_ab_initio(dG0, nH, z, nMg, pH, pMg, I, T, mu=mu_proton):
    """
        dG0, nH and z - are the species parameters (can be vectors)
        pH and I - are the conditions, must be scalars
        returns the transformed gibbs energy: dG0'
    """
    from util import log_sum_exp
    ddG0 = correction_function_ab_initio(nH, z, nMg, pH, pMg, I, T, mu)
    #print "correction term ab initio"
    #print ddG0
    dG0_tag = dG0 + ddG0
    #    return -R * T * log_sum_exp(dG0_tag / (-R*T))
    return -R_kCal * T * log_sum_exp(dG0_tag / (-R_kCal * T))
def array_transform(dG0, nH, z, nMg, pH, pMg, I, T):
    """
        dG0, nH and z - are the species parameters (can be vectors)
        pH and I - are the conditions, must be scalars
        returns the transformed gibbs energy: dG0'
    """
    from util import log_sum_exp
    ddG0 = correction_function(nH, z, nMg, pH, pMg, I, T)
    print("correction term original")
    print(ddG0)
    dG0_tag = dG0 + ddG0
    #    return -R * T * log_sum_exp(dG0_tag / (-R*T))
    return -R_kCal * T * log_sum_exp(dG0_tag / (-R_kCal * T))
Ejemplo n.º 5
0
 def calc_mutual_info(self, input, step=0, alpha=-1):
     # code referenced from https://github.com/jxhe/vae-lagging-encoder/blob/master/modules/encoders/encoder.py
     mu, logvar = self.encode(input, step, alpha)
     x_batch, code_size = mu.size()
     neg_entropy = (-0.5 * code_size * log(2 * pi)- 0.5 * (1 + logvar).sum(-1)).mean()
     
     z = self.reparameterisation(mu, logvar)
     mu, logvar = mu.unsqueeze(0), logvar.unsqueeze(0)
     var = logvar.exp()
     dev = z - mu
     log_density = -0.5 * ((dev ** 2) / var).sum(dim=-1) - 0.5 * (code_size * log(2 * pi) + logvar.sum(-1))
     log_qz = log_sum_exp(log_density, dim=1) - log(x_batch)
     
     return (neg_entropy - log_qz.mean(-1)).item()
    def _norm(self, feats):
        seq_len, batch_size, tag_size = feats.size()

        alpha = feats.data.new(batch_size, tag_size).fill_(-10000)
        alpha[:, START_TAG] = 0
        alpha = Variable(alpha)

        for feat in feats:  # batch_size x tag_size
            # [batch_size] * tag_size
            feat_exp = feat.unsqueeze(-1).expand(batch_size, tag_size,
                                                 tag_size)
            alpha_exp = alpha.unsqueeze(-1).expand(batch_size, tag_size,
                                                   tag_size)
            trans_exp = self.transitions.unsqueeze(0).expand(
                batch_size, tag_size, tag_size)
            mat = trans_exp + alpha_exp + feat_exp
            alpha = log_sum_exp(mat, 2)

        trn_exp = self.transitions[END_TAG].unsqueeze(0).expand(
            batch_size, tag_size)
        alpha = alpha + trn_exp
        norm = log_sum_exp(alpha, 1).squeeze(-1)

        return norm
Ejemplo n.º 7
0
def get_probs_and_accuracy(preds, O):
    all_probs = torch.exp(preds[:, 1] - log_sum_exp(preds, dim=1))
    N = preds.shape[0] / n_mc_smps
    probs = torch.zeros([0], device=globals.device)
    i = 0
    while i < N:
        probs = torch.cat([
            probs,
            torch.tensor([
                torch.mean(all_probs[i * n_mc_smps:i * n_mc_smps + n_mc_smps])
            ],
                         device=globals.device)
        ], 0)
        i += 1

    correct_pred = torch.eq(torch.gt(probs, 0.5).type(torch.uint8), O)
    accuracy = torch.mean((correct_pred.type(torch.float32)))
    return probs, accuracy
    def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        init_alphas = torch.full((1, self.tagset_size), -10000.)
        # '<start>' has all of the score.
        init_alphas[0][self.tag_to_ix['<start>']] = 0.

        # Wrap in a variable so that we will get automatic backprop
        forward_var = init_alphas

        # Iterate through the sentence
        for feat in feats:
            emit_score = feat.view(-1, 1)
            tag_var = forward_var + self.transitions + emit_score
            max_tag_var, _ = torch.max(tag_var, dim=1)
            tag_var = tag_var - max_tag_var.view(-1, 1)
            forward_var = max_tag_var + torch.log(
                torch.sum(torch.exp(tag_var), dim=1)).view(1, -1)

        terminal_var = forward_var + self.transitions[
            self.tag_to_ix['<stop>']].view(1, -1)
        alpha = log_sum_exp(terminal_var)
        return alpha