示例#1
0
    plt.scatter([match.target[0] for match in matches],
                [match.target[1] for match in matches])
    for i, match in enumerate(matches):
        plt.annotate(str(i), match.target)

    if hist:
        plt.figure(2)
        plt.hist([i for i in distances if i < 2], bins=20)

    plt.show()


if __name__ == "__main__":
    import syllabus

    print(putil.join("  ", render("feature", SM, BOLD, BR + BLUE)))

    main = syllabus.Task("feature",
                         desc="AKAZE Feature Matcher Test Engine").start()

    m = AkazeMatcher(cv2.imread("../hx/ref.PNG"), task=main, name="HeroX Gate")
    d = Dataset()

    test_plot(m, d.get(), main.subtask())

    main.done(join=True)

    # print("\nTrace:")
    # div.div("-")
    # print(main.json(pretty=True))
示例#2
0
def train_self(model: nn.Module, dataset: Dataset, transform: Augmentation,
               criterion: nn.Module, optimizer: optim.Optimizer, scheduler: optim.lr_scheduler.Optimizer,
               device: torch.device = None, args: Arguments.parse.Namespace = None, **kwargs) \
        -> Iterator[dict]:
    loader = data.DataLoader(dataset,
                             args.batch,
                             num_workers=args.worker,
                             drop_last=True,
                             shuffle=True,
                             collate_fn=Dataset.collate,
                             pin_memory=True)

    pseudo_dataset = Dataset.get(args.type)(args.unlabeled,
                                            transform=dataset.transform,
                                            eval_only=True)
    pseudo_loader = data.DataLoader(pseudo_dataset,
                                    args.batch,
                                    num_workers=args.worker,
                                    drop_last=True,
                                    shuffle=True,
                                    collate_fn=Dataset.collate,
                                    pin_memory=True)

    pseudo_step = args.pseudo_step
    pseudo_first_step = args.pseudo_first_step

    postfix = {}
    train_labeled, train_labeled_count = True, 0
    iterator, losses = iter(loader), list()

    with tqdm(total=args.epoch, initial=args.start_epoch) as tq:
        for iteration in range(args.start_epoch, args.epoch + 1):
            try:
                images, targets = next(iterator)

            except StopIteration:
                # generate pseudo label
                train_labeled_count += 1

                if train_labeled:
                    if pseudo_first_step:
                        if train_labeled_count > pseudo_first_step:
                            result = generate_pseudo(model,
                                                     pseudo_loader.dataset,
                                                     transform,
                                                     device,
                                                     args,
                                                     iteration=iteration)

                            pseudo_first_step = train_labeled_count = 0
                            postfix.update(result)
                            iterator = iter(pseudo_loader)
                        else:
                            iterator = iter(loader)
                    else:
                        if train_labeled_count >= pseudo_step:
                            result = generate_pseudo(model,
                                                     pseudo_loader.dataset,
                                                     transform,
                                                     device,
                                                     args,
                                                     iteration=iteration)

                            train_labeled_count = 0
                            postfix.update(result)
                            iterator = iter(pseudo_loader)
                        else:
                            iterator = iter(loader)
                else:
                    iterator = iter(loader)

                train_labeled = not train_labeled

                images, targets = next(iterator)

                if loss is not None and scheduler is not None:
                    scheduler.step()

            images = Variable(images.to(device), requires_grad=False)
            targets = [
                Variable(target.to(device), requires_grad=False)
                for target in targets
            ]

            output = model(images)
            optimizer.zero_grad()

            loc_loss, conf_loss = criterion(output, targets)
            loss = loc_loss + conf_loss
            losses.append(loss.item())
            loss.backward()
            optimizer.step()

            if torch.isnan(loss):
                print(f'NaN detected in {iteration}')

            postfix['loss'] = loss.item()
            tq.set_postfix(**postfix)
            tq.update(1)

            if args.save_epoch and not (iteration % args.save_epoch):
                torch.save(
                    model.state_dict(),
                    str(
                        Path(args.dest).joinpath(
                            f'{args.name}-{iteration:06}.pth')))

                yield {"iteration": iteration, **postfix}