def predictDistribution(self, in_data : TacticContext) \
     -> torch.FloatTensor:
     in_vec = Variable(
         FloatTensor(encode_bag_classify_input(in_data.goal,
                                               self.tokenizer))).view(
                                                   1, -1)
     return self.lsoftmax(self.linear(in_vec))
    def predictKTactics(self, in_data : TacticContext, k : int) -> \
        List[Prediction]:
        input_vector = encode_bag_classify_input(in_data.goal, self.tokenizer)

        nearest = self.bst.findKNearest(input_vector, k)
        assert not nearest is None
        for pair in nearest:
            assert not pair is None
        predictions = [Prediction(self.embedding.decode_token(output) + ".", .5**i)
                       for i, (neighbor, output) in enumerate(nearest)]
        return predictions
Example #3
0
 def predictDistribution(self, in_data : TacticContext) \
     -> torch.FloatTensor:
     in_vec = maybe_cuda(Variable(torch.FloatTensor(
         encode_bag_classify_input(in_data.goal, self.tokenizer))))\
         .view(1, -1)
     return self.network(in_vec)
 def predictDistribution(self, in_data : TacticContext) \
     -> torch.FloatTensor:
     feature_vector = encode_bag_classify_input(in_data.goal,
                                                self.tokenizer)
     distribution = self.classifier.predict_log_proba([feature_vector])[0]
     return distribution