コード例 #1
0
    def VI_loss_function_grad(yval, target):
        '''
        TODOs:
        1. check dimensionality of q for replications in while loop
-            s = np.random.multinomial(1, q, 1)
-            onehot = np.reshape(s, (-1,1))
-            ELBO_grad += np.reshape(q-s, (1,-1))*(np.dot(logp, onehot) - np.dot(logq, onehot))
+            onehot = np.zeros(2)
+            s = np.reshape(np.random.multinomial(1, np.reshape(q, (-1))), (-1,1))
+            onehot[s[0][0]] = 1
+            ELBO_grad += np.reshape(q[0]-onehot, (1,-1))*(np.dot(logp, onehot) - np.dot(logq, onehot))
        '''
        nsamps = 50
        ELBOs = []
        logq = log_softmax(yval.view(1,-1).data.numpy())
        logp = target.view(1,-1).data.numpy()
        q = np.exp(logq)[0]
        L = logp.shape[1]
        count = 0
        ELBO_grad = 0
        while count < nsamps:
            s = np.random.multinomial(1, q, 1)
            onehot = np.reshape(s, (-1,1))
            ELBO_grad += np.reshape(q-s, (1,-1))*(np.dot(logp, onehot) - np.dot(logq, onehot))
            count += 1
            
        
        grad = ELBO_grad/count
    
        return autograd.Variable(torch.Tensor(grad).type(torch.FloatTensor).view(1,-1)   )
コード例 #2
0
 def predict_next(self, prefix=None):
     hash_rep = str(
         self.src) + str(self.consumed if prefix is None else prefix)
     hash_key = int(
         hashlib.sha256(hash_rep.encode('utf-8')).hexdigest(), 16)
     dist_key = hash_key % self.num_dists
     unnorm_posterior = copy.copy(self.prob_dists[dist_key])
     unnorm_posterior[utils.EOS_ID] += (
         len(self.consumed) - len(self.src)) * unnorm_posterior.max() / 2
     return utils.log_softmax(unnorm_posterior,
                              temperature=self.model_temperature)
コード例 #3
0
    def gumbelify(self, hypo, posterior):
        vf = np.vectorize(lambda x: self.get_pos_score(hypo, x) - self.get_adjusted_score(hypo))
        shifted_posterior = vf(posterior)
        shifted_posterior = utils.log_softmax(shifted_posterior)

        np.random.seed(seed=0)
        gumbels = np.random.gumbel(loc=0, scale=1, size=shifted_posterior.shape)
        gumbel_posterior = shifted_posterior + gumbels + hypo.base_score
        Z = np.max(gumbel_posterior)

        v = hypo.score - gumbel_posterior + utils.logmexp(gumbel_posterior - Z)
        gumbel_full_posterior = hypo.score - np.maximum(0, v) - utils.logpexp(-np.abs(v))

        # make sure invalid tokens still have neg inf log probability
        gumbel_full_posterior[(posterior == utils.NEG_INF).nonzero()] == utils.NEG_INF
        return gumbel_full_posterior
コード例 #4
0
    def apply_predictor(self, hypo=None, top_n=0):
        """Get the distribution over the next word by combining the
        predictor scores.

        Args:
            top_n (int): If positive, return only the best n words.
        
        Returns:
            combined,score_breakdown: Two dicts. ``combined`` maps 
            target word ids to the combined score, ``score_breakdown``
            contains the scores for each predictor separately 
            represented as tuples (unweighted_score, predictor_weight)
        """
        assert hypo is not None or not self.gumbel
        self.apply_predictor_count += 1
        # Get posteriors
        posterior = self.predictor.predict_next()
        posterior = utils.log_softmax(posterior, temperature=self.temperature)
        # numerical stability check
        assert len(posterior) - np.count_nonzero(posterior) <= 1

        non_zero_words = self._get_non_zero_words(self.predictor, posterior)
        if len(non_zero_words) == 0:  # Special case: no word is possible
            non_zero_words = set([utils.EOS_ID])

        if self.gumbel:
            gumbel_full_posterior = self.gumbelify(hypo, posterior)
            ids, posterior, original_posterior = self.combine_posteriors(
                non_zero_words,
                gumbel_full_posterior,
                self.predictor.get_unk_probability(posterior),
                top_n=top_n,
                original_posterior=posterior)
        else:
            ids, posterior, original_posterior = self.combine_posteriors(
                non_zero_words,
                posterior,
                self.predictor.get_unk_probability(posterior),
                top_n=top_n)

        assert self.allow_unk_in_output or not utils.UNK_ID in ids
        return ids, posterior, original_posterior
コード例 #5
0
    def get_normalized_probs(self, net_output, log_probs, sample):
        """Get normalized probabilities (or log probs) from a net's output."""

        if hasattr(self,
                   'adaptive_softmax') and self.adaptive_softmax is not None:
            if sample is not None:
                assert 'target' in sample
                target = sample['target']
            else:
                target = None
            out = self.adaptive_softmax.get_log_prob(net_output[0],
                                                     target=target)
            return out.exp_() if not log_probs else out

        logits = net_output[0]
        if log_probs:
            return utils.log_softmax(logits,
                                     dim=-1,
                                     onnx_trace=self.onnx_trace)
        else:
            return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
コード例 #6
0
 def get_initial_dist(self):
     return utils.log_softmax(self.predictor.get_initial_dist(),
                              self.temperature)