Exemplo n.º 1
0
def get_model(cfg, dataset, num_classes):
    """
    Builds and returns a model and associated hyper parameters according to the config object passed.

    Args:
        cfg: A YACS config object.
        dataset: A multi domain dataset consisting of source and target datasets.
        num_classes: The class number of specific dataset.
    """

    # setup feature extractor
    feature_network, class_feature_dim, domain_feature_dim = get_video_feat_extractor(
        cfg.MODEL.METHOD.upper(), cfg.DATASET.IMAGE_MODALITY, cfg.MODEL.ATTENTION, num_classes
    )
    # setup classifier
    classifier_network = ClassNetVideo(input_size=class_feature_dim, n_class=num_classes)

    config_params = get_config(cfg)
    train_params = config_params["train_params"]
    train_params_local = deepcopy(train_params)
    method_params = {}

    method = domain_adapter.Method(cfg.DAN.METHOD)

    if method.is_mmd_method():
        model = video_domain_adapter.create_mmd_based_video(
            method=method,
            dataset=dataset,
            image_modality=cfg.DATASET.IMAGE_MODALITY,
            feature_extractor=feature_network,
            task_classifier=classifier_network,
            **method_params,
            **train_params_local,
        )
    else:
        critic_input_size = domain_feature_dim
        # setup critic network
        if method.is_cdan_method():
            if cfg.DAN.USERANDOM:
                critic_input_size = cfg.DAN.RANDOM_DIM
            else:
                critic_input_size = domain_feature_dim * num_classes
        critic_network = DomainNetVideo(input_size=critic_input_size)

        if cfg.DAN.METHOD == "CDAN":
            method_params["use_random"] = cfg.DAN.USERANDOM

        # The following calls kale.loaddata.dataset_access for the first time
        model = video_domain_adapter.create_dann_like_video(
            method=method,
            dataset=dataset,
            image_modality=cfg.DATASET.IMAGE_MODALITY,
            feature_extractor=feature_network,
            task_classifier=classifier_network,
            critic=critic_network,
            **method_params,
            **train_params_local,
        )

    return model, train_params
Exemplo n.º 2
0
def get_model(cfg, dataset, num_channels):
    """
    Builds and returns a model and associated hyper parameters according to the config object passed.

    Args:
        cfg: A YACS config object.
        dataset: A multi domain dataset consisting of source and target datasets.
        num_channels: The number of image channels.
    """

    # setup feature extractor
    feature_network, feature_dim = get_feat_extractor(cfg.MODEL.METHOD.upper(),
                                                      cfg.DATASET.NUM_CLASSES,
                                                      num_channels)
    # setup classifier
    classifier_network = ClassNetSmallImage(feature_dim,
                                            cfg.DATASET.NUM_CLASSES)

    config_params = get_config(cfg)
    train_params = config_params["train_params"]
    train_params_local = deepcopy(train_params)
    method_params = {}

    method = domain_adapter.Method(cfg.DAN.METHOD)

    if method.is_mmd_method():
        model = domain_adapter.create_mmd_based(
            method=method,
            dataset=dataset,
            feature_extractor=feature_network,
            task_classifier=classifier_network,
            **method_params,
            **train_params_local,
        )
    else:
        critic_input_size = feature_dim
        # setup critic network
        if method.is_cdan_method():
            if cfg.DAN.USERANDOM:
                critic_input_size = cfg.DAN.RANDOM_DIM
            else:
                critic_input_size = feature_dim * cfg.DATASET.NUM_CLASSES
        critic_network = DomainNetSmallImage(critic_input_size)

        if cfg.DAN.METHOD == "CDAN":
            method_params["use_random"] = cfg.DAN.USERANDOM

        # The following calls kale.loaddata.dataset_access for the first time
        model = domain_adapter.create_dann_like(
            method=method,
            dataset=dataset,
            feature_extractor=feature_network,
            task_classifier=classifier_network,
            critic=critic_network,
            **method_params,
            **train_params_local,
        )

    return model, train_params
Exemplo n.º 3
0
def test_domain_adaptor(da_method, n_fewshot, download_path, testing_cfg):
    if n_fewshot is None:
        if da_method in ["FSDANN", "MME", "Source"]:
            return
    else:
        if da_method in [
                "DANN", "CDAN", "CDAN-E", "WDGRL", "WDGRLMod", "DAN", "JAN"
        ]:
            return

    source, target, num_channels = DigitDataset.get_source_target(
        DigitDataset(SOURCE), DigitDataset(TARGET), download_path)
    dataset = MultiDomainDatasets(source,
                                  target,
                                  config_weight_type=WEIGHT_TYPE,
                                  config_size_type=DATASIZE_TYPE,
                                  n_fewshot=n_fewshot)

    # setup feature extractor
    feature_network = SmallCNNFeature(num_channels)
    # setup classifier
    feature_dim = feature_network.output_size()
    classifier_network = ClassNetSmallImage(feature_dim, NUM_CLASSES)
    train_params = testing_cfg["train_params"]
    method_params = {}
    da_method = domain_adapter.Method(da_method)

    if da_method.is_mmd_method():
        model = domain_adapter.create_mmd_based(
            method=da_method,
            dataset=dataset,
            feature_extractor=feature_network,
            task_classifier=classifier_network,
            **method_params,
            **train_params,
        )
    else:  # All other non-mmd DA methods are dann like with critic
        critic_input_size = feature_dim
        # setup critic network
        if da_method.is_cdan_method():
            critic_input_size = 1024
            method_params["use_random"] = True

        critic_network = DomainNetSmallImage(critic_input_size)

        # The following calls kale.loaddata.dataset_access for the first time
        model = domain_adapter.create_dann_like(
            method=da_method,
            dataset=dataset,
            feature_extractor=feature_network,
            task_classifier=classifier_network,
            critic=critic_network,
            **method_params,
            **train_params,
        )

    ModelTestHelper.test_model(model, train_params)
Exemplo n.º 4
0
def test_video_domain_adapter(source_cfg, target_cfg, image_modality,
                              da_method, testing_cfg, testing_training_cfg):
    source_name, source_n_class, source_trainlist, source_testlist = source_cfg.split(
        ";")
    target_name, target_n_class, target_trainlist, target_testlist = target_cfg.split(
        ";")

    # get cfg parameters
    cfg = testing_cfg
    cfg.DATASET.SOURCE = source_name
    cfg.DATASET.SRC_TRAINLIST = source_trainlist
    cfg.DATASET.SRC_TESTLIST = source_testlist
    cfg.DATASET.TARGET = target_name
    cfg.DATASET.TGT_TRAINLIST = target_trainlist
    cfg.DATASET.TGT_TESTLIST = target_testlist
    cfg.DATASET.IMAGE_MODALITY = image_modality
    cfg.DATASET.WEIGHT_TYPE = WEIGHT_TYPE
    cfg.DATASET.SIZE_TYPE = DATASIZE_TYPE
    cfg.DAN.USERANDOM = False

    # download example data
    download_file_by_url(
        url=url,
        output_directory=str(Path(cfg.DATASET.ROOT).parent.absolute()),
        output_file_name="video_test_data.zip",
        file_format="zip",
    )

    # build dataset
    source, target, num_classes = VideoDataset.get_source_target(
        VideoDataset(source_name), VideoDataset(target_name), seed, cfg)

    dataset = VideoMultiDomainDatasets(
        source,
        target,
        image_modality=cfg.DATASET.IMAGE_MODALITY,
        seed=seed,
        config_weight_type=cfg.DATASET.WEIGHT_TYPE,
        config_size_type=cfg.DATASET.SIZE_TYPE,
    )

    # setup feature extractor
    if cfg.DATASET.IMAGE_MODALITY in ["rgb", "flow"]:
        class_feature_dim = 1024
        domain_feature_dim = class_feature_dim
        if cfg.DATASET.IMAGE_MODALITY == "rgb":
            feature_network = {"rgb": VideoBoringModel(3), "flow": None}
        else:
            feature_network = {"rgb": None, "flow": VideoBoringModel(2)}
    else:
        class_feature_dim = 2048
        domain_feature_dim = int(class_feature_dim / 2)
        feature_network = {
            "rgb": VideoBoringModel(3),
            "flow": VideoBoringModel(2)
        }

    # setup classifier
    classifier_network = ClassNetVideo(input_size=class_feature_dim,
                                       n_class=num_classes)
    train_params = testing_training_cfg["train_params"]
    method_params = {}
    method = domain_adapter.Method(da_method)

    # setup DA method
    if method.is_mmd_method():
        model = video_domain_adapter.create_mmd_based_video(
            method=method,
            dataset=dataset,
            image_modality=cfg.DATASET.IMAGE_MODALITY,
            feature_extractor=feature_network,
            task_classifier=classifier_network,
            **method_params,
            **train_params,
        )
    else:
        critic_input_size = domain_feature_dim
        # setup critic network
        if method.is_cdan_method():
            if cfg.DAN.USERANDOM:
                critic_input_size = 1024
            else:
                critic_input_size = domain_feature_dim * num_classes
        critic_network = DomainNetVideo(input_size=critic_input_size)

        if da_method == "CDAN":
            method_params["use_random"] = cfg.DAN.USERANDOM

        model = video_domain_adapter.create_dann_like_video(
            method=method,
            dataset=dataset,
            image_modality=cfg.DATASET.IMAGE_MODALITY,
            feature_extractor=feature_network,
            task_classifier=classifier_network,
            critic=critic_network,
            **method_params,
            **train_params,
        )

    ModelTestHelper.test_model(model, train_params)