Ejemplo n.º 1
0
def test_yolo_v3(
    key: str,
    pretrained: Union[bool, str],
    pretrained_backbone: Union[bool, str],
    test_input: bool,
    match_const: Callable,
):
    model = ModelRegistry.create(key, pretrained)
    diff_model = match_const(pretrained_backbone=pretrained_backbone)

    if pretrained:
        compare_model(model, diff_model, same=False)
        match_model = ModelRegistry.create(key, pretrained)
        compare_model(model, match_model, same=True)

    if pretrained_backbone and pretrained_backbone is not True:
        compare_model(model.backbone, diff_model.backbone, same=False)
        match_model = ModelRegistry.create(
            key, pretrained_backbone=pretrained_backbone)
        compare_model(diff_model.backbone, match_model.backbone, same=True)

    if test_input:
        input_shape = ModelRegistry.input_shape(key)
        batch = torch.randn(1, *input_shape)
        model.eval()
        outputs = model(batch)
        assert isinstance(outputs, list)
        for output in outputs:
            assert isinstance(output, torch.Tensor)
            assert output.dim() == 5
            assert output.size(-1) == 85
Ejemplo n.º 2
0
def test_ssd_resnsets(
    key: str,
    pretrained: Union[bool, str],
    pretrained_backbone: Union[bool, str],
    test_input: bool,
    match_const: Callable,
):
    model = ModelRegistry.create(key, pretrained)
    diff_model = match_const(pretrained_backbone=pretrained_backbone)

    if pretrained:
        compare_model(model, diff_model, same=False)
        match_model = ModelRegistry.create(key, pretrained)
        compare_model(model, match_model, same=True)

    if pretrained_backbone and pretrained_backbone is not True:
        compare_model(model.feature_extractor, diff_model.feature_extractor, same=False)
        match_model = ModelRegistry.create(key, pretrained_backbone=pretrained_backbone)
        compare_model(
            diff_model.feature_extractor, match_model.feature_extractor, same=True
        )

    if test_input:
        input_shape = ModelRegistry.input_shape(key)
        batch = torch.randn(1, *input_shape)
        model.eval()
        boxes, scores = model(batch)
        assert isinstance(boxes, torch.Tensor)
        assert isinstance(scores, torch.Tensor)
        assert boxes.dim() == 3
        assert scores.dim() == 3
        assert boxes.size(0) == 1
        assert boxes.size(1) == 4
        assert scores.size(0) == 1
        assert boxes.size(2) == scores.size(2)  # check same num default boxes
Ejemplo n.º 3
0
def main():
    """
    Driver function for the script
    """
    _parser = NmArgumentParser(
        dataclass_types=LRAnalysisArguments,
        description="Utility script to Run a "
        "learning rate sensitivity analysis "
        "for a desired image classification architecture",
    )
    args_, _ = _parser.parse_args_into_dataclasses()
    save_dir, loggers = helpers.get_save_dir_and_loggers(
        args_,
        task=CURRENT_TASK,
    )

    input_shape = ModelRegistry.input_shape(args_.arch_key)
    # assume shape [C, S, S] where S is the image size
    image_size = input_shape[1]

    (
        train_dataset,
        train_loader,
        val_dataset,
        val_loader,
    ) = helpers.get_train_and_validation_loaders(
        args_,
        image_size,
        task=CURRENT_TASK,
    )

    num_classes = helpers.infer_num_classes(args_, train_dataset, val_dataset)
    model = helpers.create_model(args_, num_classes)
    lr_sensitivity(args_, model, train_loader, save_dir)
Ejemplo n.º 4
0
def main():
    """
    Driver function for the script
    """
    _parser = NmArgumentParser(dataclass_types=TrainingArguments)
    training_args, _ = _parser.parse_args_into_dataclasses()

    save_dir, loggers = helpers.get_save_dir_and_loggers(training_args,
                                                         task=CURRENT_TASK)

    input_shape = ModelRegistry.input_shape(training_args.arch_key)
    image_size = input_shape[
        1]  # assume shape [C, S, S] where S is the image size

    (
        train_dataset,
        train_loader,
        val_dataset,
        val_loader,
    ) = helpers.get_train_and_validation_loaders(training_args,
                                                 image_size,
                                                 task=CURRENT_TASK)

    num_classes = helpers.infer_num_classes(training_args, train_dataset,
                                            val_dataset)

    # # model creation
    model = helpers.create_model(training_args, num_classes)
    train(training_args, model, train_loader, val_loader, input_shape,
          save_dir, loggers)
Ejemplo n.º 5
0
def create_model(args: Any, num_classes: int) -> Module:
    """
    :param args: object with configuration for model classes
    :param num_classes: Integer representing the number of output classes
    :returns: A Module object representing the created model
    """
    with torch_distributed_zero_first(
            args.local_rank):  # only download once locally
        if args.checkpoint_path == "zoo":
            if args.recipe_path and args.recipe_path.startswith("zoo:"):
                args.checkpoint_path = Zoo.download_recipe_base_framework_files(
                    args.recipe_path, extensions=[".pth"])[0]
            else:
                raise ValueError(
                    "'zoo' provided as --checkpoint-path but a SparseZoo stub"
                    " prefixed by 'zoo:' not provided as --recipe-path")

        model = ModelRegistry.create(
            args.arch_key,
            args.pretrained,
            args.checkpoint_path,
            args.pretrained_dataset,
            num_classes=num_classes,
            **args.model_kwargs,
        )
    print(f"created model: {model}")
    return model
Ejemplo n.º 6
0
def test_mnist(key: str, pretrained: Union[bool, str], test_input: bool):
    model = ModelRegistry.create(key, pretrained)
    diff_model = mnist_net()

    if pretrained:
        compare_model(model, diff_model, same=False)
        match_model = ModelRegistry.create(key, pretrained)
        compare_model(model, match_model, same=True)

    if test_input:
        input_shape = ModelRegistry.input_shape(key)
        batch = torch.randn(1, *input_shape)
        out = model(batch)
        assert isinstance(out, tuple)
        for tens in out:
            assert tens.shape[0] == 1
            assert tens.shape[1] == 10
Ejemplo n.º 7
0
def test_torchvision_registry_models(key: str, pretrained: Union[bool, str],
                                     constructor: Callable):
    model = ModelRegistry.create(key, pretrained)
    diff_model = constructor(pretrained=False)
    compare_model(model, diff_model, same=False)

    if pretrained is True:
        # check torchvision weights are properly loaded
        match_model = constructor(pretrained=pretrained)
        compare_model(model, match_model, same=True)
Ejemplo n.º 8
0
def test_efficientnet(
    key: str,
    pretrained: Union[bool, str],
    test_input: bool,
    match_const: Callable,
    model_args: dict,
):
    model = ModelRegistry.create(key, pretrained, **model_args)
    diff_model = match_const(**model_args)

    if pretrained:
        compare_model(model, diff_model, same=False)
        match_model = ModelRegistry.create(key, pretrained, **model_args)
        compare_model(model, match_model, same=True)

    if test_input:
        input_shape = ModelRegistry.input_shape(key)
        batch = torch.randn(1, *input_shape)
        model = model.eval()
        out = model(batch)
        assert isinstance(out, tuple)
        for tens in out:
            assert tens.shape[0] == 1
            assert tens.shape[1] == 1000
Ejemplo n.º 9
0
def export_setup(args_: ExportArgs) -> Tuple[Module, Optional[str], Any]:
    """
    Pre-export setup

    :param args_ : An ExportArgs object containing config for export task.
    """
    save_dir, loggers = helpers.get_save_dir_and_loggers(args_,
                                                         task=CURRENT_TASK)
    input_shape = ModelRegistry.input_shape(args_.arch_key)
    image_size = input_shape[
        1]  # assume shape [C, S, S] where S is the image size
    (
        train_dataset,
        train_loader,
        val_dataset,
        val_loader,
    ) = helpers.get_train_and_validation_loaders(args_,
                                                 image_size,
                                                 task=CURRENT_TASK)

    # model creation
    num_classes = helpers.infer_num_classes(args_, train_dataset, val_dataset)
    model = helpers.create_model(args_, num_classes)
    return model, save_dir, val_loader