Ejemplo n.º 1
0
    def __init__(self, variables, config, proof_step, train=False):
        ''' this is the model.  As a single pass, it processes the
        inputs, and computes the losses, and runs a training step if
        train.
        '''

        # we just defined the proof step as a triple
        (tree, hyps, correct_output) = proof_step

        DefaultModel.__init__(self, config, variables, train=train)

        # fix the random seed
        if not self.train:
            np.random.seed(tree.size() + 100 * len(hyps) + 10000 * correct_output)

        correct_score = self.get_score(
                tree, hyps, None
                )

        wrong_score = nn.ConstantNode(np.array([0.0]), self.g)
        correct_output = 1*correct_output

        logits = nn.ConcatNode([wrong_score, correct_score], self.g)
        cross_entropy = nn.SoftmaxCrossEntropyLoss(correct_output, logits, self.g)
        self.loss = nn.AddNode([self.loss, cross_entropy], self.g)

        accuracy = 1 * (np.argmax(logits.value) == correct_output)
        self.outputs = [cross_entropy.value, accuracy, 1-correct_output]
        self.output_counts = [1, 1, 1]

        # perform the backpropagation if we are training
        if train:
            self.g.backprop(self.loss)
Ejemplo n.º 2
0
    def score(self, logits, correct):
        correct_index = self.config.encode[correct]

        # check if correct
        this_correct = np.argmax(logits.value) == correct_index
        if this_correct:
            self.num_correct += 1
        else:
            self.all_correct = False

        loss = nn.SoftmaxCrossEntropyLoss(correct_index, logits, self.g)
        return loss
Ejemplo n.º 3
0
    def score(self, logits, correct):
        correct_index = self.config.encode[correct]

        # check if correct
        word_idx = np.argmax(logits.value)
        self.prediction.append(self.config.decode[word_idx])
        this_correct = word_idx == correct_index
        if this_correct:
            self.num_correct += 1
        else:
            self.all_correct = False

        loss = nn.SoftmaxCrossEntropyLoss(correct_index, logits, self.g)
        return loss
Ejemplo n.º 4
0
    def __init__(self, variables, config, proof_step, train=False):
        ''' this is the model.  As a single pass, it processes the
        inputs, and computes the losses, and runs a training step if
        train.
        '''
        DefaultModel.__init__(self, config, variables, train=train)

        # fix the random seed
        if not self.train:
            np.random.seed(proof_step.context.number +
                           +proof_step.prop.number + proof_step.tree.size())

        main = self.main_get_vector(proof_step.tree, proof_step.context.hyps,
                                    proof_step.context.f)

        main = nn.DotNode(main, self.v.W, self.g)

        # get a list [right prop, wrong prop 0, ..., wrong_prop n]
        props = self.get_props(proof_step)

        ###DEBUG
        #if not self.train: print [p.label for p in props]
        ###DEBUG
        out_vectors = [
            self.prop_get_vector(prop.tree, prop.hyps, prop.f)
            for prop in props
        ]
        stacked = nn.StackNode(out_vectors, self.g)
        stacked = nn.TransposeInPlaceNode(stacked, self.g)

        logits = nn.DotNode(main, stacked, self.g)
        cross_entropy = nn.SoftmaxCrossEntropyLoss(0, logits, self.g)
        self.loss = nn.AddNode([self.loss, cross_entropy], self.g)

        accuracy = 1 * (np.argmax(logits.value) == 0)
        self.outputs = [cross_entropy.value, accuracy, 1.0 / len(props)]
        self.output_counts = [1, 1, 1]

        # perform the backpropagation if we are training
        if train:
            self.g.backprop(self.loss)