コード例 #1
0
    def _test(self, ts):

        self.model.eval()
        total_correct = 0
        total_sum = 0
        total_gold_count = 0
        total_guess_count = 0
        total_overlap_count = 0
        metrics = {}
        steps = len(ts)
        pg = create_progress_bar(steps)
        for batch_dict in ts:

            x, xch, lengths, y, ids = self.model.make_input(batch_dict)
            inputs = (x, xch, lengths)
            pred = self.model(inputs)
            correct, count, overlaps, golds, guesses = self.process_output(
                pred, y.data, lengths, ids, None, None)
            total_correct += correct
            total_sum += count
            total_gold_count += golds
            total_guess_count += guesses
            total_overlap_count += overlaps
            pg.update()

        pg.done()
        total_acc = total_correct / float(total_sum)
        # Only show the fscore if requested
        metrics['f1'] = f_score(total_overlap_count, total_gold_count,
                                total_guess_count)
        metrics['acc'] = total_acc
        return metrics
コード例 #2
0
    def test(self, ts, conll_file=None, txts=None):

        total_correct = total_sum = 0
        total_gold_count = total_guess_count = total_overlap_count = 0

        steps = len(ts)
        pg = ProgressBar(steps)
        metrics = {}
        # Only if they provide a file and the raw txts, we can write CONLL file
        handle = None
        if conll_file is not None and txts is not None:
            handle = open(conll_file, "w")

        for x, xch, y, lengths, id in ts:
            correct, count, overlaps, golds, guesses = self.process_batch(
                x, xch, y, lengths, id, handle, txts)
            total_correct += correct
            total_sum += count
            total_gold_count += golds
            total_guess_count += guesses
            total_overlap_count += overlaps
            pg.update()
        pg.done()

        total_acc = total_correct / float(total_sum)
        # Only show the fscore if requested
        metrics['f1'] = f_score(total_overlap_count, total_gold_count,
                                total_guess_count)
        metrics['acc'] = total_acc

        if handle is not None:
            handle.close()

        return metrics
コード例 #3
0
ファイル: train.py プロジェクト: dpressel/baseline
    def _test(self, ts, **kwargs):

        self.model.train = False
        total_correct = 0
        total_sum = 0
        total_gold_count = 0
        total_guess_count = 0
        total_overlap_count = 0
        metrics = {}
        steps = len(ts)
        conll_output = kwargs.get('conll_output', None)
        txts = kwargs.get('txts', None)
        handle = None
        if conll_output is not None and txts is not None:
            handle = open(conll_output, "w")
        pg = create_progress_bar(steps)
        for batch_dict in pg(ts):

            lengths = batch_dict[self.model.lengths_key]
            ids = batch_dict['ids']
            y = batch_dict['y']
            pred = self.model.predict(batch_dict)
            correct, count, overlaps, golds, guesses = self.process_output(pred, y, lengths, ids, handle, txts)
            total_correct += correct
            total_sum += count
            total_gold_count += golds
            total_guess_count += guesses
            total_overlap_count += overlaps

        total_acc = total_correct / float(total_sum)
        # Only show the fscore if requested
        metrics['f1'] = f_score(total_overlap_count, total_gold_count, total_guess_count)
        metrics['acc'] = total_acc
        return metrics
コード例 #4
0
    def test(self, ts, conll_output=None, txts=None):

        total_correct = total_sum = 0
        total_gold_count = total_guess_count = total_overlap_count = 0

        steps = len(ts)
        pg = create_progress_bar(steps)
        metrics = {}
        # Only if they provide a file and the raw txts, we can write CONLL file
        handle = None
        if conll_output is not None and txts is not None:
            handle = open(conll_output, "w")

        try:
            for batch_dict in pg(ts):
                correct, count, overlaps, golds, guesses = self.process_batch(
                    batch_dict, handle, txts)
                total_correct += correct
                total_sum += count
                total_gold_count += golds
                total_guess_count += guesses
                total_overlap_count += overlaps

            total_acc = total_correct / float(total_sum)
            # Only show the fscore if requested
            metrics['f1'] = f_score(total_overlap_count, total_gold_count,
                                    total_guess_count)
            metrics['acc'] = total_acc
        finally:
            if handle is not None:
                handle.close()

        return metrics
コード例 #5
0
ファイル: train.py プロジェクト: dpressel/baseline
    def test(self, ts, conll_output=None, txts=None):

        total_correct = total_sum = 0
        total_gold_count = total_guess_count = total_overlap_count = 0

        steps = len(ts)
        pg = create_progress_bar(steps)
        metrics = {}
        # Only if they provide a file and the raw txts, we can write CONLL file
        handle = None
        if conll_output is not None and txts is not None:
            handle = open(conll_output, "w")

        try:
            for batch_dict in pg(ts):
                correct, count, overlaps, golds, guesses = self.process_batch(batch_dict, handle, txts)
                total_correct += correct
                total_sum += count
                total_gold_count += golds
                total_guess_count += guesses
                total_overlap_count += overlaps

            total_acc = total_correct / float(total_sum)
            # Only show the fscore if requested
            metrics['f1'] = f_score(total_overlap_count, total_gold_count, total_guess_count)
            metrics['acc'] = total_acc
        finally:
            if handle is not None:
                handle.close()

        return metrics
コード例 #6
0
    def _test(self, ts):

        self.model.eval()

        total_loss = 0
        total_correct = 0
        total_sum = 0
        total_gold_count = 0
        total_guess_count = 0
        total_overlap_count = 0
        metrics = {}
        steps = len(ts)
        pg = ProgressBar(steps)
        for x, xch, y, lengths, ids in ts:
            x, xch, y = self._wrap(x, xch, y)
            pred = self.model((x, xch))
            loss = self.crit(pred, y)
            total_loss += loss.data[0]
            correct, count, overlaps, golds, guesses = self.process_output(
                pred, y, lengths, ids, None, None)
            total_correct += correct
            total_sum += count
            total_gold_count += golds
            total_guess_count += guesses
            total_overlap_count += overlaps
            pg.update()

        pg.done()
        total_acc = total_correct / float(total_sum)
        # Only show the fscore if requested
        metrics['f1'] = f_score(total_overlap_count, total_gold_count,
                                total_guess_count)
        metrics['acc'] = total_acc
        metrics['avg_loss'] = float(total_loss) / total_sum
        return metrics
コード例 #7
0
    def _test(self, ts, **kwargs):

        self.model.eval()
        total_correct = 0
        total_sum = 0
        total_gold_count = 0
        total_guess_count = 0
        total_overlap_count = 0
        metrics = {}
        steps = len(ts)
        conll_output = kwargs.get('conll_output', None)
        txts = kwargs.get('txts', None)
        handle = None
        if conll_output is not None and txts is not None:
            handle = open(conll_output, "w")
        pg = create_progress_bar(steps)
        for batch_dict in pg(ts):

            inputs = self.model.make_input(batch_dict)
            y = inputs.pop('y')
            lengths = inputs['lengths']
            ids = inputs['ids']
            pred = self.model(inputs)
            correct, count, overlaps, golds, guesses = self.process_output(
                pred, y.data, lengths, ids, handle, txts)
            total_correct += correct
            total_sum += count
            total_gold_count += golds
            total_guess_count += guesses
            total_overlap_count += overlaps

        total_acc = total_correct / float(total_sum)
        # Only show the fscore if requested
        metrics['f1'] = f_score(total_overlap_count, total_gold_count,
                                total_guess_count)
        metrics['acc'] = total_acc
        return metrics
コード例 #8
0
ファイル: train.py プロジェクト: switchfootsid/baseline
    def _test(self, ts, **kwargs):

        self.model.train = False
        total_correct = 0
        total_sum = 0
        total_gold_count = 0
        total_guess_count = 0
        total_overlap_count = 0
        metrics = {}
        steps = len(ts)
        conll_output = kwargs.get('conll_output', None)
        txts = kwargs.get('txts', None)
        handle = None
        if conll_output is not None and txts is not None:
            handle = open(conll_output, "w")
        pg = create_progress_bar(steps)
        for batch_dict in ts:

            x, xch, lengths, y, ids = self.model.make_input(batch_dict)
            pred = self.model.predict((x, xch), lengths)

            correct, count, overlaps, golds, guesses = self.process_output(
                pred, y, lengths, ids, handle, txts)
            total_correct += correct
            total_sum += count
            total_gold_count += golds
            total_guess_count += guesses
            total_overlap_count += overlaps
            pg.update()

        pg.done()
        total_acc = total_correct / float(total_sum)
        # Only show the fscore if requested
        metrics['f1'] = f_score(total_overlap_count, total_gold_count,
                                total_guess_count)
        metrics['acc'] = total_acc
        return metrics