示例#1
0
 def configs() -> Configs:
     c = Configs()
     c.add(
         name="learning_rate",
         type=float,
         default=0.001,
         choices=(5e-7, 5e-1),
         strategy="loguniform",
         description="Learning rate.",
     )
     c.add(
         name="optimizer_beta1",
         type=float,
         default=0.9,
         choices=(0, 0.999),
         strategy="uniform",
         description="Beta 1.",
     )
     c.add(
         name="optimizer_beta2",
         type=float,
         default=0.999,
         choices=(0, 0.99999),
         strategy="uniform",
         description="Beta 2.",
     )
     c.add(
         name="weight_decay",
         type=float,
         default=1e-2,
         choices=(1e-6, 1e-1),
         strategy="loguniform",
         description="Weight decay.",
     )
     return c
示例#2
0
 def configs() -> Configs:
     c = Configs()
     c.add(
         name="optimization_metric",
         default="loss",
         type=str,
         choices=MetricMixin.metric_names(),
         description=
         "Name of the performance metric that should be optimized",
     )
     c.add(
         name="test_ensemble",
         type=int,
         default=0,
         strategy="constant",
         description=
         "Flag indicating whether the test dataset should yield a clip ensemble.",
     )
     c.add(
         name="gpus",
         type=str,
         default=None,
         strategy="constant",
         description=
         "Which gpus should be used. Can be either the number of gpus (e.g. '2') or a list of gpus (e.g. ('0,1').",
     )
     c.add(
         name="loss",
         type=str,
         default="cross_entropy",
         choices=loss_names,
         strategy="constant",
         description="Loss function used during optimisation.",
     )
     return c
示例#3
0
 def configs(self) -> Configs:
     c = Configs()
     c.add(
         name="trials",
         default=30,
         type=int,
         description="Number of trials in the hyperparameter search",
     )
     c.add(
         name="gpus_per_trial",
         default=0,
         type=int,
         description="Number of GPUs per trail in the hyperparameter search",
     )
     c.add(
         name="optimization_metric",
         default="loss",
         type=str,
         choices=self.Module.metric_names(),
         description="Name of the performance metric that should be optimized",
     )
     c.add(
         name="from_hparam_space_file",
         default=None,
         type=str,
         description="Path to file with specification for the search space during hyper-parameter optimisation",
     )
     return c
示例#4
0
 def configs() -> Configs:
     c = Configs()
     c.add(
         name="target_gflops",
         type=float,
         default=2.0,
         strategy="constant",
         description="Target (Giga) Floating Point Operations per Second.",
     )
     return c
示例#5
0
 def configs():
     c = Configs()
     c.add(
         name="hidden_dim",
         type=int,
         default=128,
         strategy="choice",
         choices=[128, 256, 512, 1024],
         description="Number of hiden units.",
     )
     return c
示例#6
0
 def configs() -> Configs:
     c = Configs()
     c.add(
         name="extract_features_after_layer",
         default="",
         type=str,
         description=(
             "Layer name after which to extract features. "
             "Nested layers may be selected using dot-notation, "
             "e.g. `block.subblock.layer1`"
         ),
     )
     return c
示例#7
0
 def configs() -> Configs:
     c = Configs()
     c.add(
         name="learning_rate",
         type=float,
         default=0.1,
         choices=(5e-2, 5e-1),
         strategy="loguniform",
         description="Learning rate.",
     )
     c.add(
         name="weight_decay",
         type=float,
         default=1e-5,
         choices=(1e-6, 1e-3),
         strategy="loguniform",
         description="Weight decay.",
     )
     c.add(
         name="momentum",
         type=float,
         default=0.9,
         choices=(0, 0.999),
         strategy="uniform",
         description="Momentum.",
     )
     return c
示例#8
0
 def configs() -> Configs:
     c = Configs()
     c.add(
         name="metric_selection",
         default=default_config,
         type=str,
         strategy="constant",
         description="Selection key for MetricSelector.",
         choices=list(mapping.keys()),
     )
     for Metric in metric_set:
         if hasattr(Metric, "configs"):
             c += Metric.configs()
     return c
示例#9
0
 def configs() -> Configs:
     c = Configs.collect(DummyRegressionDataLoader)
     c.add(
         name="input_shape",
         type=int,
         default=10,
         strategy="constant",
         description="Input shape for data.",
     )
     return c
示例#10
0
 def configs():
     c = Configs.collect(MnistDataset)
     c.add(
         name="val_split",
         type=int,
         default=5000,
         strategy="constant",
         description=
         "Number samples from train dataset used for validation split.",
     )
     c.add(
         name="normalize",
         type=int,
         default=1,
         choices=[0, 1],
         strategy="constant",
         description="Whether to normalize dataset.",
     )
     return c
示例#11
0
    def configs() -> Configs:
        c = Configs()

        c.add(
            name="unfreeze_from_epoch",
            type=int,
            default=-1,
            description=
            "Number of epochs to wait before starting gradual unfreeze. If -1, unfreeze is omitted.",
        )
        c.add(
            name="unfreeze_layers_must_include",
            type=str,
            default="",
            description=
            "String that must be contained in layer names which should be unfrozen. If empty, this feature is disabled.",
        )
        c.add(
            name="unfreeze_epoch_step",
            type=int,
            default=1,
            description="Number of epochs to train before next unfreeze.",
        )
        c.add(
            name="unfreeze_layers_initial",
            type=int,
            default=1,
            strategy="choice",
            description=
            "Number layers to unfreeze initially. If `-1`, it will be equal to total_layers",
        )
        c.add(
            name="unfreeze_layer_step",
            type=int,
            default=1,
            description=
            "Number additional layers to unfreeze at each unfreeze step. If `-1`, all layers are unfrozon after a step",
        )
        c.add(
            name="unfreeze_layers_max",
            type=int,
            default=-1,
            description=
            "Maximum number of layers to unfreeze. If `-1`, it will be equal to total_layers",
        )
        return c
示例#12
0
    def run(self, args: AttributeDict):
        """Run hyperparameter search using the `tune.schedulers.ASHAScheduler`

        Args:
            args (AttributeDict): Arguments

        Side-effects:
            Saves logs to `TUNE_LOGS_PATH / args.id`
        """
        try:
            from ray import tune
            from ray.tune.integration.pytorch_lightning import (
                TuneReportCheckpointCallback,
            )
        except ModuleNotFoundError as e:  # pragma: no cover
            logger.error(
                "To use hyperparameter search, first install Ray Tune via `pip install 'ray[tune]'` or `pip install 'ride[extras]'`"
            )
            raise e

        if not hasattr(args, "id"):
            args.id = "hparamsearch"

        module_config = (
            Configs.from_file(args.from_hparam_space_file)
            if args.from_hparam_space_file
            else self.Module.configs()
        ).tune_config()

        config = {
            **dict(args),
            **module_config,
            # pl.Trainer args:
            "gpus": args.gpus_per_trial,
            "logger": False,
            "accumulate_grad_batches": (
                (8 // args.gpus_per_trial) * args.accumulate_grad_batches
                if args.gpus_per_trial
                else args.accumulate_grad_batches
            ),
        }
        scheduler = tune.schedulers.ASHAScheduler(
            metric=f"val/{args.optimization_metric}",
            mode=self.Module.metrics()[args.optimization_metric].value,
            max_t=args.max_epochs,
            grace_period=1,
            reduction_factor=2,
        )

        metric_names = [f"val/{m}" for m in self.Module.metrics().keys()]

        reporter = tune.CLIReporter(
            metric_columns=[*metric_names, "training_iteration"],
        )
        tune_callbacks = [
            TuneReportCheckpointCallback(
                metrics=metric_names,
                filename="checkpoint",
                on="validation_end",
            )
        ]
        cpus_per_trial = max(
            1,
            (
                min(10 * args.gpus_per_trial, NUM_CPU - 10)
                if args.gpus_per_trial
                else min(10, NUM_CPU - 2)
            ),
        )

        analysis = tune.run(
            partial(
                Runner.static_train_and_val,
                self.Module,
                trainer_callbacks=tune_callbacks,
            ),
            name=args.id,
            local_dir=str(TUNE_LOGS_PATH),
            resources_per_trial={"cpu": cpus_per_trial, "gpu": args.gpus_per_trial},
            config=config,
            num_samples=args.trials,
            scheduler=scheduler,
            progress_reporter=reporter,
            raise_on_failed_trial=False,
        )

        best_hparams = analysis.get_best_config(
            metric=f"val/{args.optimization_metric}",
            mode=self.Module.metrics()[args.optimization_metric].value,
            scope="all",
        )
        # Select only model parameters
        if best_hparams:
            best_hparams = {
                k: best_hparams[k]
                for k in [
                    *self.Module.configs().names,
                    # Trainer parameters that influence model hparams:
                    "accumulate_grad_batches",
                    "batch_size",
                    "gpus",
                ]
            }
        return best_hparams