Exemplo n.º 1
0
def test(model_file, test_file, filter_type=True, limit=-1, device=-1):
    context = utils.Saver.load_context(model_file)
    logger = logging.getLogger()
    logger.trace('# context: {}'.format(context))
    if context.seed is not None:
        utils.set_random_seed(context.seed, device)

    loader = context.builder.loader
    loader.filter_coord = filter_type
    encoder_input = context.encoder_input

    cont_embed_file_ext = _get_cont_embed_file_ext(encoder_input)
    use_cont_embed = cont_embed_file_ext is not None

    test_dataset = loader.load_with_external_resources(
        test_file, train=False, bucketing=False,
        size=None if limit < 0 else limit,
        use_external_postags=True,
        use_contextualized_embed=use_cont_embed,
        contextualized_embed_file_ext=cont_embed_file_ext,
        logger=logger)
    logger.info('{} samples loaded for test'.format(len(test_dataset)))

    model = context.builder.build()
    chainer.serializers.load_npz(model_file, model)
    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()
        model.to_gpu(device)

    parser = parsers.build_parser(loader, model)
    evaluator = eval_module.Evaluator(
        parser, logger=logger, report_details=True)
    reporter = training.listeners.Reporter(logger)

    logger.info('Start decoding')
    utils.chainer_train_off()
    evaluator.on_epoch_validate_begin({'epoch': -1})
    pbar = tqdm(total=len(test_dataset))
    for batch in test_dataset.batch(
            context.batch_size, colwise=True, shuffle=False):
        xs, ts = batch[:-1], batch[-1]
        ys = model.forward(*xs)
        loss = model.compute_loss(ys, ts)
        with reporter:
            values = dict(loss=float(chainer.cuda.to_cpu(loss.data)))
            model.compute_accuracy(ys, ts)
            for k, v in model.result.items():
                if 'loss' in k:
                    values[k] = float(chainer.cuda.to_cpu(v.data))
                elif 'accuracy' in k:
                    values[k] = v
            reporter.report(values)
        evaluator.on_batch_end({'train': False, 'xs': xs, 'ts': ts})
        pbar.update(len(ts))
    pbar.close()
    reporter._output_log("testing", reporter.get_summary(),
                         {'epoch': -1, 'size': len(test_dataset)})
    evaluator.on_epoch_validate_end({'epoch': -1})
Exemplo n.º 2
0
def k_fold_cross_validation(data_set, k, pruning=False):
    accuracy = np.zeros(k)
    tree = cls.DecisionTreeClassifier()
    best_tree = cls.DecisionTreeClassifier()
    max_accuracy = 0
    trees = []
    prePruneConfMatrix = []
    postPruneConfMatrix = []

    for i in range(1, k + 1):
        # Split Data into training and testing data
        split = split_set(data_set, k, i, pruning)
        testing = split[0]
        training = split[1]
        training_x = training[:, :-1]
        training_y = [chr(i) for i in training.T[-1]]

        # Train tree
        testing_y = [chr(i) for i in testing.T[-1]]

        trees.append(cls.DecisionTreeClassifier())
        trees[i - 1].train(training_x, training_y)
        tree = trees[i - 1]

        if pruning:
            predictions = tree.predict(testing)
            confusion = ev.Evaluator.confusion_matrix(predictions, testing_y)
            prePruneConfMatrix.append(confusion)
            validation = split[2]
            tree.prune(
                (validation[:, :-1], [chr(i) for i in validation[:, -1]]))

        predictions = tree.predict(testing)

        # Evaluation metrics
        eval = ev.Evaluator()
        testing_y = [chr(i) for i in testing.T[-1]]
        confusion = eval.confusion_matrix(predictions, testing_y)
        accuracy[i - 1] = eval.accuracy(confusion)

        # Save tree with best accuracy
        confusion = ev.Evaluator.confusion_matrix(predictions, testing_y)
        postPruneConfMatrix.append(confusion)
        accuracy[i - 1] = ev.Evaluator.accuracy(confusion)

        if accuracy[i - 1] > max_accuracy:
            best_tree = trees[i - 1]
            max_accuracy = accuracy[i - 1]

    if pruning:
        print("Pre pruning metrics")
        analyseListOfConfMatrix(prePruneConfMatrix)
        print("Post pruning results")
        analyseListOfConfMatrix(postPruneConfMatrix)

    return accuracy, best_tree, trees
Exemplo n.º 3
0
def prune(tree, node, validation, annotation, prev_node=None, left=None):
    """Visit every decision node and prune if removing its children does not affect the 
       accuracy, but stops trying removing a subtree when removing the bottom leaf nodes
       of the subtree hass been proven to decrease the accuracy. 
    """
    if isinstance(node, nd.LeafNode):
        return True

    if isinstance(node, nd.DecisionNode):
        # go to the bottom of decision tree
        true_branch = prune(tree, node.child_true, validation, annotation,
                            node, True)
        false_branch = prune(tree, node.child_false, validation, annotation,
                             node, False)
        if true_branch and false_branch:

            base_prediction = tree.predict(validation)
            evaluator = ev.Evaluator()
            base_confusion = evaluator.confusion_matrix(
                base_prediction, annotation)
            base_accuracy = evaluator.accuracy(base_confusion)

            cur_freq = hp.merge_freq(node.child_true.cur_freq,
                                     node.child_false.cur_freq)
            init_freq = node.child_true.init_freq

            # back up the children
            if left:
                saved = cp.deepcopy(prev_node.child_true)
                prev_node.child_true = nd.LeafNode(cur_freq, init_freq)
            elif not left:
                saved = cp.deepcopy(prev_node.child_false)
                prev_node.child_false = nd.LeafNode(cur_freq, init_freq)

            # calculate the current accuracy
            cur_prediction = tree.predict(validation)
            cur_confusion = evaluator.confusion_matrix(cur_prediction,
                                                       annotation)
            cur_accuracy = evaluator.accuracy(cur_confusion)

            # compare the current and original accuracy
            if cur_accuracy >= base_accuracy:
                return True
            if left:
                prev_node.child_true = saved
            elif not left:
                prev_node.child_false = saved
            return False

    return False
Exemplo n.º 4
0
def prune_more(dt_classifier,
               node,
               validation,
               annotation,
               prev_node=None,
               node_class=None):
    """Visit every decision node and prune if removing its children does not affect the accuracy
    """
    if isinstance(node, nd.LeafNode):
        return node
    else:
        if isinstance(node.child_true, nd.DecisionNode):
            node.child_true = prune_more(dt_classifier, node.child_true,
                                         validation, annotation, node, True)
        if isinstance(node.child_false, nd.DecisionNode):
            node.child_false = prune_more(dt_classifier, node.child_false,
                                          validation, annotation, node, False)

        # save the current node
        node_backup = cp.deepcopy(node)

        # get original accuracy
        base_prediction = dt_classifier.predict(validation)
        evaluator = ev.Evaluator()
        base_confusion = evaluator.confusion_matrix(base_prediction,
                                                    annotation)
        base_accuracy = evaluator.accuracy(base_confusion)

        # root_node
        if node_class == None:
            return node
        # prune chidlren
        cur_freq, init_freq = remove_children(node)
        node = nd.LeafNode(cur_freq, init_freq)
        if node_class == True:
            prev_node.child_true = node
        elif node_class == False:
            prev_node.child_false = node

        # get new accuracy
        cur_prediction = dt_classifier.predict(validation)
        cur_confusion = evaluator.confusion_matrix(cur_prediction, annotation)
        cur_accuracy = evaluator.accuracy(cur_confusion)

        # decide whether confirm pruning
        if cur_accuracy >= base_accuracy:
            return node
        return node_backup
Exemplo n.º 5
0
def check_grammar(test_file, limit=-1, grammar_type=1):
    loader = dataset.DataLoader(filter_coord=True)
    test_dataset = loader.load(test_file,
                               train=True,
                               bucketing=False,
                               size=None if limit < 0 else limit)
    word_vocab = loader.get_processor('word').vocab
    from models.gold import GoldModel
    model = GoldModel()
    if grammar_type == 1:
        cfg = parsers.Grammar.CFG_COORD_1 + parsers.Grammar.CFG
    elif grammar_type == 2:
        cfg = parsers.Grammar.CFG_COORD_2 + parsers.Grammar.CFG
    else:
        raise ValueError("Invalid grammar type: {}".format(grammar_type))
    grammar = parsers.Grammar(word_vocab, cfg)
    parser = parsers.CkyParser(model, grammar)
    evaluator = eval_module.Evaluator(parser,
                                      logger=logging,
                                      report_details=False)
    n_corrects = 0
    pbar = tqdm(total=len(test_dataset))
    for batch in test_dataset.batch(size=20, colwise=True, shuffle=False):
        xs, ts = batch[:-1], batch[-1]
        true_coords_batch = ts
        model.set_gold(true_coords_batch)
        pred_coords_batch = evaluator._parser.parse(*xs, n_best=1)
        for i, (pred_coord_entries, true_coords) in \
                enumerate(zip(pred_coords_batch, true_coords_batch)):
            pred_coords, _score = pred_coord_entries[0]
            true_coords = {
                ckey: coord
                for ckey, coord in true_coords.items() if coord is not None
            }
            if pred_coords == true_coords:
                n_corrects += 1
            else:
                sentence = ' '.join(
                    [word_vocab.lookup(word_id) for word_id in xs[0][i]])
                print("SENTENCE: {}\nPRED: {}\nTRUE: {}\n-".format(
                    sentence, pred_coords, true_coords))
            evaluator.add(pred_coords, true_coords)
        pbar.update(len(ts))
    pbar.close()
    evaluator.report()
    logging.info("Number of correct tree: {}/{}".format(
        n_corrects, len(test_dataset)))
Exemplo n.º 6
0
def print_results(predictions, labels, name):
    eval = ev.Evaluator()

    confusion = eval.confusion_matrix(predictions, labels)
    accuracy = eval.accuracy(confusion)
    precision = eval.precision(confusion)
    recall = eval.recall(confusion)
    f1_score = eval.f1_score(confusion)

    print(" ")
    print(" ")
    print("Summary evaluation for " + str(name))
    print("____________________________________")
    print("Confusion Matrix: ")
    print(str(confusion))
    print("Accuracy: " + str(accuracy))
    print("Precision: " + str(precision))
    print("Recall: " + str(recall))
    print("F1 Score: " + str(f1_score))
    print("____________________________________")
Exemplo n.º 7
0
import utils
from datasets import CocoSingleKPS
from transform import get_single_kps_transforms

IMAGE_SIZE = 128, 128

args, remaining_args = utils.get_args()
engine = eng.Engine.command_line_init(args=remaining_args)
mean = CocoSingleKPS.MEAN
std = CocoSingleKPS.STD
coco_train = CocoSingleKPS.from_data_path(args.data_path,
                                          train=True,
                                          transforms=get_single_kps_transforms(
                                              True, IMAGE_SIZE, mean, std))
coco_val = CocoSingleKPS.from_data_path(args.data_path,
                                        train=False,
                                        transforms=get_single_kps_transforms(
                                            False, IMAGE_SIZE, mean, std))

num_instructions = len(coco_utils.KEYPOINTS)
model = models.resnet18(td_outplanes=64, num_instructions=num_instructions)
td_head = models.TDHead(num_channels=num_instructions)
model = models.SequentialInstructor(model,
                                    num_instructions,
                                    td_head=td_head,
                                    skip_lateral=args.skip_lateral)

evaluator = eval.Evaluator(original_size=IMAGE_SIZE, loss=args.loss)
plot = eval.Visualizer(CocoSingleKPS.MEAN, CocoSingleKPS.STD)
engine.run(model, coco_train, coco_val, evaluator, plot_fn=plot)
Exemplo n.º 8
0
def train(train_file,
          test_file=None,
          format='tree',
          embed_file=None,
          n_epoch=20,
          batch_size=20,
          lr=0.001,
          limit=-1,
          l2_lambda=0.0,
          grad_clip=5.0,
          encoder_input=('char', 'postag'),
          model_config=None,
          device=-1,
          save_dir=None,
          seed=None,
          cache_dir='',
          refresh_cache=False,
          bert_model=0,
          bert_dir=''):
    if seed is not None:
        utils.set_random_seed(seed, device)
    logger = logging.getLogger()
    # logger.configure(filename='log.txt', logdir=save_dir)
    assert isinstance(logger, logging.AppLogger)
    if model_config is None:
        model_config = {}
    model_config['bert_model'] = bert_model
    model_config['bert_dir'] = bert_dir

    os.makedirs(save_dir, exist_ok=True)

    read_genia = format == 'genia'
    loader = dataset.DataLoader.build(
        postag_embed_size=model_config.get('postag_embed_size', 50),
        char_embed_size=model_config.get('char_embed_size', 10),
        word_embed_file=embed_file,
        filter_coord=(not read_genia),
        refresh_cache=refresh_cache,
        format=format,
        cache_options=dict(dir=cache_dir, mkdir=True, logger=logger),
        extra_ids=(git.hash(), ))

    use_external_postags = not read_genia
    cont_embed_file_ext = _get_cont_embed_file_ext(encoder_input)
    use_cont_embed = cont_embed_file_ext is not None

    train_dataset = loader.load_with_external_resources(
        train_file,
        train=True,
        bucketing=False,
        size=None if limit < 0 else limit,
        refresh_cache=refresh_cache,
        use_external_postags=use_external_postags,
        use_contextualized_embed=use_cont_embed,
        contextualized_embed_file_ext=cont_embed_file_ext)
    logging.info('{} samples loaded for training'.format(len(train_dataset)))
    test_dataset = None
    if test_file is not None:
        test_dataset = loader.load_with_external_resources(
            test_file,
            train=False,
            bucketing=False,
            size=None if limit < 0 else limit // 10,
            refresh_cache=refresh_cache,
            use_external_postags=use_external_postags,
            use_contextualized_embed=use_cont_embed,
            contextualized_embed_file_ext=cont_embed_file_ext)
        logging.info('{} samples loaded for validation'.format(
            len(test_dataset)))

    builder = models.CoordSolverBuilder(loader,
                                        inputs=encoder_input,
                                        **model_config)
    logger.info("{}".format(builder))
    model = builder.build()
    logger.trace("Model: {}".format(model))
    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()
        model.to_gpu(device)

    if bert_model == 1:
        optimizer = chainer.optimizers.AdamW(alpha=lr)
        optimizer.setup(model)
        # optimizer.add_hook(chainer.optimizer.GradientClipping(1.))
    else:
        optimizer = chainer.optimizers.AdamW(alpha=lr,
                                             beta1=0.9,
                                             beta2=0.999,
                                             eps=1e-08)
        optimizer.setup(model)
        if l2_lambda > 0.0:
            optimizer.add_hook(chainer.optimizer.WeightDecay(l2_lambda))
        if grad_clip > 0.0:
            optimizer.add_hook(chainer.optimizer.GradientClipping(grad_clip))

    def _report(y, t):
        values = {}
        model.compute_accuracy(y, t)
        for k, v in model.result.items():
            if 'loss' in k:
                values[k] = float(chainer.cuda.to_cpu(v.data))
            elif 'accuracy' in k:
                values[k] = v
        training.report(values)

    trainer = training.Trainer(optimizer, model, loss_func=model.compute_loss)
    trainer.configure(utils.training_config)
    trainer.add_listener(
        training.listeners.ProgressBar(lambda n: tqdm(total=n)), priority=200)
    trainer.add_hook(training.BATCH_END,
                     lambda data: _report(data['ys'], data['ts']))
    if test_dataset:
        parser = parsers.build_parser(loader, model)
        evaluator = eval_module.Evaluator(parser,
                                          logger=logging,
                                          report_details=False)
        trainer.add_listener(evaluator)

    if bert_model == 2:
        num_train_steps = 20000 * 5 / 20
        num_warmup_steps = 10000 / 20
        learning_rate = 2e-5
        # learning rate (eta) scheduling in Adam
        lr_decay_init = learning_rate * \
            (num_train_steps - num_warmup_steps) / num_train_steps
        trainer.add_hook(
            training.BATCH_END,
            extensions.LinearShift(  # decay
                'eta', (lr_decay_init, 0.),
                (num_warmup_steps, num_train_steps),
                optimizer=optimizer))
        trainer.add_hook(
            training.BATCH_END,
            extensions.WarmupShift(  # warmup
                'eta',
                0.,
                num_warmup_steps,
                learning_rate,
                optimizer=optimizer))

    if save_dir is not None:
        accessid = logging.getLogger().accessid
        date = logging.getLogger().accesstime.strftime('%Y%m%d')
        # metric = 'whole' if isinstance(model, models.Teranishi17) else 'inner'
        metric = 'exact'
        trainer.add_listener(
            utils.Saver(
                model,
                basename="{}-{}".format(date, accessid),
                context=dict(App.context, builder=builder),
                directory=save_dir,
                logger=logger,
                save_best=True,
                evaluate=(lambda _: evaluator.get_overall_score(metric))))

    trainer.fit(train_dataset, test_dataset, n_epoch, batch_size)
Exemplo n.º 9
0
import transform
import utils
from datasets import CocoSingleKPS

IMAGE_SIZE = 256, 256

data_path, remaining_args = utils.get_args()
engine = eng.Engine.command_line_init(args=remaining_args)

data_transform = transform.Compose([
    transform.ResizeKPS(IMAGE_SIZE),
    transform.extract_keypoints,
    transform.ToTensor(),
    transform.ImageTargetWrapper(T.Normalize(CocoSingleKPS.MEAN, CocoSingleKPS.STD))
])

selected_kps = ['left_eye', 'right_eye']
coco_train = CocoSingleKPS.from_data_path(data_path, train=True, transforms=data_transform, keypoints=selected_kps)
coco_val = CocoSingleKPS.from_data_path(data_path, train=False, transforms=data_transform, keypoints=selected_kps)

num_instructions = len(selected_kps)
model = models.resnet50(td_outplanes=1, num_instructions=num_instructions)
if len(selected_kps) == 1:
    model.one_iteration()
model = models.SequentialInstructor(model, num_instructions)

train_eval = eval.Evaluator()
val_eval = eval.Evaluator()
plot = eval.Visualizer(CocoSingleKPS.MEAN, CocoSingleKPS.STD)
engine.run(model, coco_train, coco_val, train_eval, val_eval, plot_fn=plot)