Пример #1
0
    def get_adapter_kwargs(self, models, optimizers, before_training_starts,
                           lr_multiplier, **kwargs):
        feature_combiner = models.pop("feature_combiner")
        models = Models(models)
        optimizers = Optimizers(optimizers, multipliers={"D": lr_multiplier})
        misc = Misc({"feature_combiner": feature_combiner})

        d_weight = self.optuna_trial.suggest_float("d_weight", 0, 1)
        g_weight = self.optuna_trial.suggest_float("g_weight", 0, 1)
        label_weight = self.optuna_trial.suggest_float("label_weight", 0, 1)

        d_weighter = MeanWeighter(scale=d_weight)
        g_weighter = MeanWeighter(weights={
            "g_src_domain_loss": g_weight,
            "g_target_domain_loss": g_weight,
            "c_loss": label_weight,
        }, )
        hook_kwargs = {
            "d_weighter": d_weighter,
            "g_weighter": g_weighter,
        }
        return {
            "models": models,
            "optimizers": optimizers,
            "misc": misc,
            "before_training_starts": before_training_starts,
            "hook_kwargs": hook_kwargs,
        }
Пример #2
0
    def get_adapter_kwargs(self, models, optimizers, before_training_starts,
                           lr_multiplier, **kwargs):
        models = Models(models)
        optimizers = Optimizers(optimizers, multipliers={"D": lr_multiplier})
        d_weight = self.optuna_trial.suggest_float("d_weight", 0, 1)
        g_weight = self.optuna_trial.suggest_float("g_weight", 0, 1)
        src_weight = self.optuna_trial.suggest_float("src_weight", 0, 1)
        target_weight = self.optuna_trial.suggest_float("target_weight", 0, 1)
        d_loss_weighter = MeanWeighter(scale=d_weight)
        g_loss_weighter = MeanWeighter(
            weights={
                "src_vat_loss": src_weight,
                "target_vat_loss": target_weight,
                "entropy_loss": target_weight,
                "g_src_domain_loss": g_weight,
                "g_target_domain_loss": g_weight,
            })

        vat_loss_fn = VATLoss(epsilon=2)

        hook_kwargs = {
            "d_weighter": d_loss_weighter,
            "g_weighter": g_loss_weighter,
            "vat_loss_fn": vat_loss_fn,
        }

        return {
            "models": models,
            "optimizers": optimizers,
            "misc": None,
            "before_training_starts": before_training_starts,
            "hook_kwargs": hook_kwargs,
        }
Пример #3
0
    def get_adapter_kwargs(
        self, models, optimizers, before_training_starts, lr_multiplier, **kwargs
    ):
        models = Models(models)
        optimizers = Optimizers(
            optimizers,
            multipliers={
                "C": lr_multiplier,
                "D": lr_multiplier,
            },
        )
        domain_weight = self.optuna_trial.suggest_float("domain_weight", 0, 1)
        bridge_G_weight = self.optuna_trial.suggest_float("bridge_G_weight", 0, 1)
        bridge_D_weight = self.optuna_trial.suggest_float("bridge_D_weight", 0, 1)
        weighter = MeanWeighter(
            weights={
                "src_domain_loss": domain_weight,
                "target_domain_loss": domain_weight,
                "g_src_bridge_loss": bridge_G_weight,
                "d_src_bridge_loss": bridge_D_weight,
                "g_target_bridge_loss": bridge_G_weight,
                "d_target_bridge_loss": bridge_D_weight,
            }
        )

        grl_weight = self.optuna_trial.suggest_float("grl_weight", 0.1, 10, log=True)
        hook_kwargs = {"weighter": weighter, "gradient_reversal_weight": grl_weight}

        return {
            "models": models,
            "optimizers": optimizers,
            "misc": None,
            "before_training_starts": before_training_starts,
            "hook_kwargs": hook_kwargs,
        }
 def get_adapter_kwargs(self, models, optimizers, before_training_starts,
                        lr_multiplier, **kwargs):
     models = Models(models)
     optimizers = Optimizers(
         optimizers,
         multipliers={"D": lr_multiplier},
     )
     d_scale = self.optuna_trial.suggest_float("d_scale", 0, 1)
     g_scale = self.optuna_trial.suggest_float("g_scale", 0, 1)
     d_weighter = MeanWeighter(scale=d_scale)
     g_weighter = MeanWeighter(scale=g_scale)
     d_accuracy_threshold = self.optuna_trial.suggest_float(
         "d_accuracy_threshold", 0, 1)
     hook_kwargs = {
         "threshold": d_accuracy_threshold,
         "d_weighter": d_weighter,
         "g_weighter": g_weighter,
     }
     return {
         "models": models,
         "optimizers": optimizers,
         "misc": None,
         "before_training_starts": before_training_starts,
         "hook_kwargs": hook_kwargs,
     }
 def get_adapter_kwargs(self, models, optimizers, before_training_starts,
                        lr_multiplier, **kwargs):
     del models["D"]
     models = Models(models)
     optimizers = Optimizers(optimizers, multipliers={"C": lr_multiplier})
     return {
         "models": models,
         "optimizers": optimizers,
         "misc": None,
         "before_training_starts": before_training_starts,
         "hook_kwargs": {},
     }
    def get_adapter_kwargs(self, models, optimizers, before_training_starts,
                           lr_multiplier, **kwargs):
        feature_combiner = models.pop("feature_combiner")
        models = Models(models)
        optimizers = Optimizers(optimizers,
                                multipliers={"residual_model": lr_multiplier})
        misc = Misc({"feature_combiner": feature_combiner})

        label_weight = self.optuna_trial.suggest_float("label_weight", 0, 1)

        confusion_weight = self.optuna_trial.suggest_float(
            "confusion_weight", 0, 1)
        entropy_weight = self.optuna_trial.suggest_float(
            "entropy_weight", 0, 1)
        weighter = MeanWeighter(
            weights={
                "c_loss": label_weight,
                "features_confusion_loss": confusion_weight,
                "entropy_loss": entropy_weight,
            })

        exponent = self.optuna_trial.suggest_int("exponent", 1, 8)
        num_kernels = (exponent * 2) + 1
        kernel_scales = get_kernel_scales(low=-exponent,
                                          high=exponent,
                                          num_kernels=num_kernels)
        hook_kwargs = {
            "weighter": weighter,
            "aligner_loss_fn": MMDLoss(kernel_scales=kernel_scales),
        }

        return {
            "models": models,
            "optimizers": optimizers,
            "misc": misc,
            "before_training_starts": before_training_starts,
            "hook_kwargs": hook_kwargs,
        }
Пример #7
0
    def get_adapter_kwargs(self, models, optimizers, before_training_starts,
                           lr_multiplier, **kwargs):
        models = Models(models)
        optimizers = Optimizers(
            optimizers,
            multipliers={"C": lr_multiplier},
        )

        domain_weight = self.optuna_trial.suggest_float("domain_weight", 0, 1)
        category_weight = self.optuna_trial.suggest_float(
            "category_weight", 0, 1)
        confusion_weight = self.optuna_trial.suggest_float(
            "confusion_weight", 0, 1)
        entropy_weight = self.optuna_trial.suggest_float(
            "entropy_weight", 0, 1)

        c_weighter = MeanWeighter(
            weights={
                "c_symnets_src_domain_loss_0": domain_weight,
                "c_symnets_target_domain_loss_1": domain_weight,
            })
        g_weighter = MeanWeighter(
            weights={
                "symnets_category_loss": category_weight,
                "g_symnets_target_domain_loss_0": confusion_weight,
                "g_symnets_target_domain_loss_1": confusion_weight,
                "symnets_entropy_loss": entropy_weight,
            })

        hook_kwargs = {"c_weighter": c_weighter, "g_weighter": g_weighter}

        return {
            "models": models,
            "optimizers": optimizers,
            "misc": None,
            "before_training_starts": before_training_starts,
            "hook_kwargs": hook_kwargs,
        }
Пример #8
0
    def get_adapter_kwargs(
        self, models, optimizers, before_training_starts, lr_multiplier, **kwargs
    ):
        models = Models(models)
        optimizers = Optimizers(optimizers, multipliers={"C": lr_multiplier})

        confusion_weight = self.optuna_trial.suggest_float("confusion_weight", 0, 1)
        label_weight = self.optuna_trial.suggest_float("label_weight", 0, 1)
        weighter = MeanWeighter(
            weights={
                "features_confusion_loss": confusion_weight,
                "logits_confusion_loss": confusion_weight,
                "c_loss": label_weight,
            }
        )
        hook_kwargs = {"weighter": weighter}
        return {
            "models": models,
            "optimizers": optimizers,
            "misc": None,
            "before_training_starts": before_training_starts,
            "hook_kwargs": hook_kwargs,
        }
    def get_adapter_kwargs(
        self, models, optimizers, before_training_starts, lr_multiplier, **kwargs
    ):
        models = Models(models)
        optimizers = Optimizers(
            optimizers,
            multipliers={"C": lr_multiplier},
        )

        num_repeat = self.optuna_trial.suggest_int("num_repeat", 1, 10)

        label_weight = self.optuna_trial.suggest_float("label_weight", 0, 1)
        discrepancy_weight = self.optuna_trial.suggest_float("discrepancy_weight", 0, 1)

        x_weighter = MeanWeighter(scale=label_weight)
        y_weighter = MeanWeighter(
            weights={
                "c_loss0": label_weight,
                "c_loss1": label_weight,
                "discrepancy_loss": discrepancy_weight,
            }
        )
        z_weighter = MeanWeighter(scale=discrepancy_weight)
        hook_kwargs = {
            "repeat": num_repeat,
            "x_weighter": x_weighter,
            "y_weighter": y_weighter,
            "z_weighter": z_weighter,
        }

        return {
            "models": models,
            "optimizers": optimizers,
            "misc": None,
            "before_training_starts": before_training_starts,
            "hook_kwargs": hook_kwargs,
        }