Пример #1
0
class Example3:
    """ Examples with other actions. """
    b: bool = False
    debug:   bool = field(alias="-d", action="store_true")
    verbose: bool = field(alias="-v", action="store_true")

    cache: bool = False
Пример #2
0
class HParams:
    """Set of options for the training of a Model."""

    num_layers: int = field(4, alias="-n")
    num_units: int = field(64, alias="-u")
    optimizer: str = field("ADAM", alias=["-o", "--opt"])
    learning_rate: float = field(0.001, alias="-lr")
Пример #3
0
class RunSettings:
    """Parameters for a run."""

    # wether or not to execute in debug mode.
    debug: bool = field(alias=["-d"], default=False)
    # wether or not to add a lot of logging information.
    verbose: bool = field(alias=["-v"], action="store_true")
Пример #4
0
class InputArgs:
    # Start date from which to collect data about base users. Input in iso format (YYYY-MM-DD).
    # The date is included in the data
    start_date: str = field(alias="s", metadata={'a': 'b'})

    # End date for collecting base users. Input in iso format (YYYY-MM-DD). The date is included in the data.
    # Should not be before `start_date`
    end_date: str = field(alias="e")
Пример #5
0
    class Foo(TestSetup):
        """ Some class Foo """

        # A sequence of tasks.
        task_sequence: List[str] = field(choices=["train", "test",
                                                  "ood"])  # side
        """Below"""
Пример #6
0
    class HParams(BaselineModel.HParams):
        """ Hyper-parameters of our customized baseline model.
        """

        # Hyper-parameters of our simple new auxiliary task.
        simple_reg: SimpleRegularizationAuxTask.Options = field(
            default_factory=SimpleRegularizationAuxTask.Options)
Пример #7
0
class KnnClassifierOptions:
    """ Set of options for configuring the KnnClassifier. """
    n_neighbors: int = field(default=5,
                             alias="n_neighbours")  # Number of neighbours.
    metric: str = "cosine"
    algorithm: str = "auto"  # See the sklearn docs
    leaf_size: int = 30  # See the sklearn docs
    p: int = 2  # see the sklean docs
    n_jobs: Optional[int] = -1  # see the sklearn docs.
Пример #8
0
def hparam(default: T,
           *args,
           prior: Union[Type[Prior[T]], Prior[T]] = None,
           **kwargs) -> T:
    metadata = kwargs.get("metadata", {})
    min: Optional[float] = kwargs.get("min", kwargs.get("min"))
    max: Optional[float] = kwargs.get("max", kwargs.get("max"))

    if prior is None:
        assert min is not None and max is not None
        # if min and max are passed but no Prior object, assume a Uniform prior.
        prior = UniformPrior(min=min, max=max)
        metadata.update({
            "min": min,
            "max": max,
            "prior": prior,
        })

    elif isinstance(prior, type) and issubclass(
            prior, (UniformPrior, LogUniformPrior)):
        # use the prior as a constructor.
        assert min is not None and max is not None
        prior = prior(min=min, max=max)

    elif isinstance(prior, Prior):
        metadata["prior"] = prior
        if isinstance(prior, (UniformPrior, LogUniformPrior)):
            metadata.update(dict(
                min=prior.min,
                max=prior.max,
            ))
        elif isinstance(prior, (NormalPrior)):
            metadata.update(dict(
                mu=prior.mu,
                sigma=prior.sigma,
            ))

    else:
        # TODO: maybe support an arbitrary callable?
        raise RuntimeError(
            "hparam should receive either: \n"
            "- `min` and `max` kwargs, \n"
            "- `min` and `max` kwargs and a type of Prior to use, \n"
            "- a `Prior` instance.")

    kwargs["metadata"] = metadata
    return field(
        default=default,
        *args,
        **kwargs,
    )
Пример #9
0
class ReplayOptions(Serializable):
    """ Options related to Replay. """
    # Size of the labeled replay buffer.
    labeled_buffer_size: int = field(0, alias="replay_buffer_size")
    # Size of the unlabeled replay buffer.
    unlabeled_buffer_size: int = 0

    # Always use the replay buffer to help "smooth" out the data stream.
    always_use_replay: bool = False
    # Sampling size, when used as described above to smooth out the data stream.
    # If not given, will use the same value as the batch size.
    sampled_batch_size: Optional[int] = None

    @property
    def enabled(self) -> bool:
        return self.labeled_buffer_size > 0 or self.unlabeled_buffer_size > 0
Пример #10
0
class ExperimentCfg():
    output_dir: str = field(alias="-o")
    ignore_existing_output_contents: bool = True
    gpu: int = 0
    use_wandb: bool = True
    wandb_user: str = 'none'
    project_name: str = 'causal_comp_prep'
    experiment_name: str = 'default'
    instance_name: str = 'default'
    git_hash: str = ''
    sync_uid: str = ''
    report_imbalanced_metrics: bool = False

    # float precision when logging to CSV file
    csv_precision: int = 8

    delete_dumped_preds: bool = True

    def __post_init__(self):

        # Set default experiment name
        self._set_default_experiment_name()

    def _set_default_experiment_name(self):
        at_ngc: bool = ('NGC_JOB_ID' in os.environ.keys())
        at_docker = np.in1d(['/opt/conda/bin'], np.array(sys.path))[0]
        at_local_docker = at_docker and not at_ngc
        name_suffix = '_local'
        if at_local_docker:
            name_suffix += '_docker'
        elif at_ngc:
            name_suffix = '_ngc'
        if self.experiment_name == 'default':
            self.experiment_name = 'dev' + name_suffix
        if self.instance_name == 'default':
            self.instance_name = 'dev' + name_suffix

    def __getitem__(self, key):
        """ Allow accessing instance attributes as dictionary keys """
        return getattr(self, key)
Пример #11
0
class ClassIncrementalSetting(PassiveSetting, IncrementalSetting):
    """Supervised Setting where the data is a sequence of 'tasks'.

    This class is basically is the supervised version of an Incremental Setting


    The current task can be set at the `current_task_id` attribute.
    """

    Results: ClassVar[Type[Results]] = ClassIncrementalResults

    # (NOTE: commenting out PassiveSetting.Observations as it is the same class
    # as Setting.Observations, and we want a consistent method resolution order.
    @dataclass(frozen=True)
    class Observations(  #PassiveSetting.Observations,
            IncrementalSetting.Observations):
        """ Incremental Observations, in a supervised context. """

        pass

    # @dataclass(frozen=True)
    # class Actions(PassiveSetting.Actions,
    #               IncrementalSetting.Actions):
    #     """Incremental Actions, in a supervised (passive) context."""
    #     pass

    # @dataclass(frozen=True)
    # class Rewards(PassiveSetting.Rewards,
    #               IncrementalSetting.Rewards):
    #     """Incremental Rewards, in a supervised context."""
    #     pass

    # Class variable holding a dict of the names and types of all available
    # datasets.
    # TODO: Issue #43: Support other datasets than just classification
    available_datasets: ClassVar[Dict[str, Type[_ContinuumDataset]]] = {
        c.__name__.lower(): c
        for c in [
            CIFARFellowship,
            MNISTFellowship,
            ImageNet100,
            ImageNet1000,
            CIFAR10,
            CIFAR100,
            EMNIST,
            KMNIST,
            MNIST,
            QMNIST,
            FashionMNIST,
            Synbols,
        ]
        # "synbols": Synbols,
        # "synbols_font": partial(Synbols, task="fonts"),
    }
    # A continual dataset to use. (Should be taken from the continuum package).
    dataset: str = choice(available_datasets.keys(), default="mnist")

    # Transformations to use. See the Transforms enum for the available values.
    transforms: List[Transforms] = list_field(
        Transforms.to_tensor,
        # BUG: The input_shape given to the Model doesn't have the right number
        # of channels, even if we 'fixed' them here. However the images are fine
        # after.
        Transforms.three_channels,
        Transforms.channels_first_if_needed,
    )

    # Either number of classes per task, or a list specifying for
    # every task the amount of new classes.
    increment: Union[int, List[int]] = list_field(2,
                                                  type=int,
                                                  nargs="*",
                                                  alias="n_classes_per_task")
    # The scenario number of tasks.
    # If zero, defaults to the number of classes divied by the increment.
    nb_tasks: int = 0
    # A different task size applied only for the first task.
    # Desactivated if `increment` is a list.
    initial_increment: int = 0
    # An optional custom class order, used for NC.
    class_order: Optional[List[int]] = None
    # Either number of classes per task, or a list specifying for
    # every task the amount of new classes (defaults to the value of
    # `increment`).
    test_increment: Optional[Union[List[int], int]] = None
    # A different task size applied only for the first test task.
    # Desactivated if `test_increment` is a list. Defaults to the
    # value of `initial_increment`.
    test_initial_increment: Optional[int] = None
    # An optional custom class order for testing, used for NC.
    # Defaults to the value of `class_order`.
    test_class_order: Optional[List[int]] = None

    # TODO: Need to put num_workers in only one place.
    batch_size: int = field(default=32, cmd=False)
    num_workers: int = field(default=4, cmd=False)

    # Wether or not to relabel the images to be within the [0, n_classes_per_task]
    # range. Floating (False by default) in Class-Incremental Setting, but set to True
    # in domain_incremental Setting.
    relabel: bool = False

    def __post_init__(self):
        """Initializes the fields of the Setting (and LightningDataModule),
        including the transforms, shapes, etc.
        """
        if isinstance(self.increment, list) and len(self.increment) == 1:
            # This can happen when parsing a list from the command-line.
            self.increment = self.increment[0]

        base_reward_space = reward_spaces[self.dataset]
        # action space = reward space by default
        base_action_space = base_reward_space

        if isinstance(base_action_space, spaces.Discrete):
            # Classification dataset

            self.num_classes = base_action_space.n
            # Set the number of tasks depending on the increment, and vice-versa.
            # (as only one of the two should be used).
            if self.nb_tasks == 0:
                self.nb_tasks = self.num_classes // self.increment
            else:
                self.increment = self.num_classes // self.nb_tasks
        else:
            raise NotImplementedError("TODO: (issue #43)")

        if not self.class_order:
            self.class_order = list(range(self.num_classes))

        # Test values default to the same as train.
        self.test_increment = self.test_increment or self.increment
        self.test_initial_increment = self.test_initial_increment or self.test_increment
        self.test_class_order = self.test_class_order or self.class_order

        # TODO: For now we assume a fixed, equal number of classes per task, for
        # sake of simplicity. We could take out this assumption, but it might
        # make things a bit more complicated.
        assert isinstance(self.increment, int)
        assert isinstance(self.test_increment, int)

        self.n_classes_per_task: int = self.increment
        action_space = spaces.Discrete(self.n_classes_per_task)
        reward_space = spaces.Discrete(self.n_classes_per_task)

        super().__post_init__(
            # observation_space=observation_space,
            action_space=action_space,
            reward_space=reward_space,  # the labels have shape (1,) always.
        )
        self.train_datasets: List[_ContinuumDataset] = []
        self.val_datasets: List[_ContinuumDataset] = []
        self.test_datasets: List[_ContinuumDataset] = []

        # This will be set by the Experiment, or passed to the `apply` method.
        # TODO: This could be a bit cleaner.
        self.config: Config
        # Default path to which the datasets will be downloaded.
        self.data_dir: Optional[Path] = None

        self.train_env: PassiveEnvironment = None  # type: ignore
        self.val_env: PassiveEnvironment = None  # type: ignore
        self.test_env: PassiveEnvironment = None  # type: ignore

    @property
    def observation_space(self) -> NamedTupleSpace:
        """ The un-batched observation space, based on the choice of dataset and
        the transforms at `self.transforms` (which apply to the train/valid/test
        environments).

        The returned spaces is a NamedTupleSpace, with the following properties:
        - `x`: observation space (e.g. `Image` space)
        - `task_labels`: Union[Discrete, Sparse[Discrete]]
           The task labels for each sample. When task labels are not available,
           the task labels space is Sparse, and entries will be `None`.
        """
        x_space = base_observation_spaces[self.dataset]
        if not self.transforms:
            # NOTE: When we don't pass any transforms, continuum scenarios still
            # at least use 'to_tensor'.
            x_space = Transforms.to_tensor(x_space)

        # apply the transforms to the observation space.
        for transform in self.transforms:
            x_space = transform(x_space)
        x_space = add_tensor_support(x_space)

        task_label_space = spaces.Discrete(self.nb_tasks)
        if not self.task_labels_at_train_time:
            task_label_space = Sparse(task_label_space, 1.0)
        task_label_space = add_tensor_support(task_label_space)

        return NamedTupleSpace(
            x=x_space,
            task_labels=task_label_space,
            dtype=self.Observations,
        )

    @property
    def action_space(self) -> spaces.Discrete:
        """ Action space for this setting. """
        if self.relabel:
            return spaces.Discrete(self.n_classes_per_task)
        return spaces.Discrete(self.num_classes)

        # TODO: IDEA: Have the action space only reflect the number of 'current' classes
        # in order to create a "true" class-incremental learning setting.
        n_classes_seen_so_far = 0
        for task_id in range(self.current_task_id):
            n_classes_seen_so_far += self.num_classes_in_task(task_id)
        return spaces.Discrete(n_classes_seen_so_far)

    @property
    def reward_space(self) -> spaces.Discrete:
        return self.action_space

    def apply(self,
              method: Method,
              config: Config = None) -> ClassIncrementalResults:
        """Apply the given method on this setting to producing some results."""
        # TODO: It still isn't super clear what should be in charge of creating
        # the config, and how to create it, when it isn't passed explicitly.
        self.config: Config
        if config is not None:
            self.config = config
            logger.debug(f"Using Config {self.config}")
        elif isinstance(getattr(method, "config", None), Config):
            # If the Method has a `config` attribute that is a Config, use that.
            self.config = method.config
            logger.debug(f"Using Config from the Method: {self.config}")
        else:
            logger.debug("Parsing the Config from the command-line.")
            self.config = Config.from_args(self._argv, strict=False)
            logger.debug(f"Resulting Config: {self.config}")

        method.configure(setting=self)

        # Run the main loop (which is defined in IncrementalSetting).
        results: ClassIncrementalResults = super().main_loop(method)
        logger.info(results.summary())
        method.receive_results(self, results=results)
        return results

    def prepare_data(self, data_dir: Path = None, **kwargs):
        self.config = self.config or Config.from_args(self._argv, strict=False)

        # if self.batch_size is None:
        #     logger.warning(UserWarning(
        #         f"Using the default batch size of 32. (You can set the "
        #         f"batch size by passing a value to the Setting constructor, or "
        #         f"by setting the attribute inside your 'configure' method) "
        #     ))
        #     self.batch_size = 32

        data_dir = data_dir or self.data_dir or self.config.data_dir
        self.make_dataset(data_dir, download=True)
        self.data_dir = data_dir
        super().prepare_data(**kwargs)

    def setup(self, stage: Optional[str] = None, *args, **kwargs):
        """ Creates the datasets for each task.
        TODO: Figure out a way of setting data_dir elsewhere maybe?
        """
        assert self.config
        # self.config = self.config or Config.from_args(self._argv)
        logger.debug(
            f"data_dir: {self.data_dir}, setup args: {args} kwargs: {kwargs}")

        self.train_cl_dataset = self.make_dataset(self.data_dir,
                                                  download=False,
                                                  train=True)
        self.test_cl_dataset = self.make_dataset(self.data_dir,
                                                 download=False,
                                                 train=False)

        self.train_cl_loader: _BaseScenario = self.make_train_cl_loader(
            self.train_cl_dataset)
        self.test_cl_loader: _BaseScenario = self.make_test_cl_loader(
            self.test_cl_dataset)

        logger.info(f"Number of train tasks: {self.train_cl_loader.nb_tasks}.")
        logger.info(f"Number of test tasks: {self.train_cl_loader.nb_tasks}.")

        self.train_datasets.clear()
        self.val_datasets.clear()
        self.test_datasets.clear()

        for task_id, train_dataset in enumerate(self.train_cl_loader):
            train_dataset, val_dataset = split_train_val(
                train_dataset, val_split=self.val_fraction)
            self.train_datasets.append(train_dataset)
            self.val_datasets.append(val_dataset)

        for task_id, test_dataset in enumerate(self.test_cl_loader):
            self.test_datasets.append(test_dataset)

        super().setup(stage, *args, **kwargs)

        # TODO: Adding this temporarily just for the competition
        self.test_boundary_steps = [0] + list(
            itertools.accumulate(map(len, self.test_datasets)))[:-1]
        self.test_steps = sum(map(len, self.test_datasets))
        # self.test_steps = [0] + list(
        #     itertools.accumulate(map(len, self.test_datasets))
        # )[:-1]

    def get_train_dataset(self) -> Dataset:
        return self.train_datasets[self.current_task_id]

    def get_val_dataset(self) -> Dataset:
        return self.val_datasets[self.current_task_id]

    def get_test_dataset(self) -> Dataset:
        return ConcatDataset(self.test_datasets)

    def train_dataloader(self,
                         batch_size: int = None,
                         num_workers: int = None) -> PassiveEnvironment:
        """Returns a DataLoader for the train dataset of the current task. """
        if not self.has_prepared_data:
            self.prepare_data()
        if not self.has_setup_fit:
            self.setup("fit")

        if self.train_env:
            self.train_env.close()

        batch_size = batch_size if batch_size is not None else self.batch_size
        num_workers = num_workers if num_workers is not None else self.num_workers

        dataset = self.get_train_dataset()
        # TODO: Add some kind of Wrapper around the dataset to make it
        # semi-supervised.
        env = PassiveEnvironment(
            dataset,
            split_batch_fn=self.split_batch_function(training=True),
            observation_space=self.observation_space,
            action_space=self.action_space,
            reward_space=self.reward_space,
            pin_memory=True,
            batch_size=batch_size,
            num_workers=num_workers,
            # Since the dataset only contains data from the current task(s), it's fine
            # to shuffle here. TODO: Double-check this.
            shuffle=True,
        )

        if self.config.render:
            # TODO: Add a callback wrapper that calls 'env.render' at each step?
            env = RenderEnvWrapper(env)

        if self.train_transforms:
            # TODO: Check that the transforms aren't already being applied in the
            # 'dataset' portion.
            env = TransformObservation(env, f=self.train_transforms)

        if self.monitor_training_performance:
            env = MeasureSLPerformanceWrapper(
                env,
                first_epoch_only=True,
                wandb_prefix=f"Train/Task {self.current_task_id}",
            )

        self.train_env = env
        return self.train_env

    def val_dataloader(self,
                       batch_size: int = None,
                       num_workers: int = None) -> PassiveEnvironment:
        """Returns a DataLoader for the validation dataset of the current task.
        """
        if not self.has_prepared_data:
            self.prepare_data()
        if not self.has_setup_fit:
            self.setup("fit")

        dataset = self.get_val_dataset()
        batch_size = batch_size if batch_size is not None else self.batch_size
        num_workers = num_workers if num_workers is not None else self.num_workers
        env = PassiveEnvironment(
            dataset,
            split_batch_fn=self.split_batch_function(training=True),
            observation_space=self.observation_space,
            action_space=self.action_space,
            reward_space=self.reward_space,
            pin_memory=True,
            batch_size=batch_size,
            num_workers=num_workers,
            # Since the dataset only contains data from the current task(s), it's fine
            # to shuffle here. TODO: Double-check this.
            shuffle=True,
        )
        if self.val_transforms:
            env = TransformObservation(env, f=self.val_transforms)

        if self.val_env:
            self.val_env.close()
            del self.val_env
        self.val_env = env
        return self.val_env

    def test_dataloader(
        self,
        batch_size: int = None,
        num_workers: int = None
    ) -> PassiveEnvironment["ClassIncrementalSetting.Observations", Actions,
                            Rewards]:
        """Returns a DataLoader for the test dataset of the current task.
        """
        if not self.has_prepared_data:
            self.prepare_data()
        if not self.has_setup_test:
            self.setup("test")

        # Testing this out, we're gonna have a "test schedule" like this to try
        # to imitate the MultiTaskEnvironment in RL.
        transition_steps = [0] + list(
            itertools.accumulate(map(len, self.test_datasets)))[:-1]
        # Join all the test datasets.
        dataset = self.get_test_dataset()

        batch_size = batch_size if batch_size is not None else self.batch_size
        num_workers = num_workers if num_workers is not None else self.num_workers

        env = PassiveEnvironment(
            dataset,
            batch_size=batch_size,
            num_workers=num_workers,
            split_batch_fn=self.split_batch_function(training=False),
            observation_space=self.observation_space,
            action_space=self.action_space,
            reward_space=self.reward_space,
            pretend_to_be_active=True,
            shuffle=False,
        )
        if self.test_transforms:
            env = TransformObservation(env, f=self.test_transforms)

        # NOTE: Two ways of removing the task labels: Either using a different
        # 'split_batch_fn' at train and test time, or by using this wrapper
        # which is also used in the RL side of the tree:
        # TODO: Maybe remove/simplify the 'split_batch_function'.
        from sequoia.settings.active.continual.wrappers import HideTaskLabelsWrapper

        if not self.task_labels_at_test_time:
            env = HideTaskLabelsWrapper(env)

        # FIXME: Creating a 'task schedule' for the TestEnvironment, mimicing what's in
        # the RL settings.
        test_task_schedule = dict.fromkeys(
            [step // (env.batch_size or 1) for step in transition_steps],
            range(len(transition_steps)),
        )
        # TODO: Configure the 'monitoring' dir properly.
        test_dir = "results"
        test_loop_max_steps = len(dataset) // (env.batch_size or 1)
        # TODO: Fix this: iteration doesn't ever end for some reason.

        test_env = ClassIncrementalTestEnvironment(
            env,
            directory=test_dir,
            step_limit=test_loop_max_steps,
            task_schedule=test_task_schedule,
            force=True,
            config=self.config,
        )

        if self.test_env:
            self.test_env.close()
        self.test_env = test_env
        return self.test_env

    def split_batch_function(
        self, training: bool
    ) -> Callable[[Tuple[Tensor, ...]], Tuple[Observations, Rewards]]:
        """ Returns a callable that is used to split a batch into observations and rewards.
        """
        task_classes = {
            i: self.task_classes(i, train=training)
            for i in range(self.nb_tasks)
        }

        def split_batch(
                batch: Tuple[Tensor, ...]) -> Tuple[Observations, Rewards]:
            """Splits the batch into a tuple of Observations and Rewards.

            Parameters
            ----------
            batch : Tuple[Tensor, ...]
                A batch of data coming from the dataset.

            Returns
            -------
            Tuple[Observations, Rewards]
                A tuple of Observations and Rewards.
            """
            # In this context (class_incremental), we will always have 3 items per
            # batch, because we use the ClassIncremental scenario from Continuum.
            assert len(batch) == 3
            x, y, t = batch

            # Relabel y so it is always in [0, n_classes_per_task) for each task.
            if self.relabel:
                y = relabel(y, task_classes)

            if (training and not self.task_labels_at_train_time) or (
                    not training and not self.task_labels_at_test_time):
                # Remove the task labels if we're not currently allowed to have
                # them.
                # TODO: Using None might cause some issues. Maybe set -1 instead?
                t = None

            observations = self.Observations(x=x, task_labels=t)
            rewards = self.Rewards(y=y)

            return observations, rewards

        return split_batch

    def make_train_cl_loader(
            self, train_dataset: _ContinuumDataset) -> _BaseScenario:
        """ Creates a train ClassIncremental object from continuum. """
        return ClassIncremental(
            train_dataset,
            nb_tasks=self.nb_tasks,
            increment=self.increment,
            initial_increment=self.initial_increment,
            class_order=self.class_order,
            transformations=self.transforms,
        )

    def make_test_cl_loader(self,
                            test_dataset: _ContinuumDataset) -> _BaseScenario:
        """ Creates a test ClassIncremental object from continuum. """
        return ClassIncremental(
            test_dataset,
            nb_tasks=self.nb_tasks,
            increment=self.test_increment,
            initial_increment=self.test_initial_increment,
            class_order=self.test_class_order,
            transformations=self.transforms,
        )

    def make_dataset(self,
                     data_dir: Path,
                     download: bool = True,
                     train: bool = True,
                     **kwargs) -> _ContinuumDataset:
        # TODO: #7 Use this method here to fix the errors that happen when
        # trying to create every single dataset from continuum.
        data_dir = Path(data_dir)

        if not data_dir.exists():
            data_dir.mkdir(parents=True, exist_ok=True)

        if self.dataset in self.available_datasets:
            dataset_class = self.available_datasets[self.dataset]
            return dataset_class(data_path=data_dir,
                                 download=download,
                                 train=train,
                                 **kwargs)

        elif self.dataset in self.available_datasets.values():
            dataset_class = self.dataset
            return dataset_class(data_path=data_dir,
                                 download=download,
                                 train=train,
                                 **kwargs)

        elif isinstance(self.dataset, Dataset):
            logger.info(f"Using a custom dataset {self.dataset}")
            return self.dataset

        else:
            raise NotImplementedError

    # These methods below are used by the MultiHeadModel, mostly when
    # using a multihead model, to figure out how to relabel the batches, or how
    # many classes there are in the current task (since we support a different
    # number of classes per task).
    # TODO: Remove this? Since I'm simplifying to a fixed number of classes per
    # task for now...

    def num_classes_in_task(self, task_id: int,
                            train: bool) -> Union[int, List[int]]:
        """ Returns the number of classes in the given task. """
        increment = self.increment if train else self.test_increment
        if isinstance(increment, list):
            return increment[task_id]
        return increment

    def num_classes_in_current_task(self, train: bool = None) -> int:
        """ Returns the number of classes in the current task. """
        # TODO: Its ugly to have the 'method' tell us if we're currently in
        # train/eval/test, no? Maybe just make a method for each?
        return self.num_classes_in_task(self._current_task_id, train=train)

    def task_classes(self, task_id: int, train: bool) -> List[int]:
        """ Gives back the 'true' labels present in the given task. """
        start_index = sum(
            self.num_classes_in_task(i, train) for i in range(task_id))
        end_index = start_index + self.num_classes_in_task(task_id, train)
        if train:
            return self.class_order[start_index:end_index]
        else:
            return self.test_class_order[start_index:end_index]

    def current_task_classes(self, train: bool) -> List[int]:
        """ Gives back the labels present in the current task. """
        return self.task_classes(self._current_task_id, train)

    def _check_environments(self):
        """ Do a quick check to make sure that the dataloaders give back the
        right observations / reward types.
        """
        for loader_method in [
                self.train_dataloader,
                self.val_dataloader,
                self.test_dataloader,
        ]:
            logger.debug(f"Checking loader method {loader_method.__name__}")
            env = loader_method(batch_size=5)
            obs = env.reset()
            assert isinstance(obs, self.Observations)
            # Convert the observation to numpy arrays, to make it easier to
            # check if the elements are in the spaces.
            obs = obs.numpy()
            # take a slice of the first batch, to get sample tensors.
            first_obs = obs[:, 0]
            # TODO: Here we'd like to be able to check that the first observation
            # is inside the observation space, but we can't do that because the
            # task label might be None, and so that would make it fail.
            x, task_label = first_obs
            if task_label is None:
                assert x in self.observation_space[0]

            for i in range(5):
                actions = env.action_space.sample()
                observations, rewards, done, info = env.step(actions)
                assert isinstance(observations,
                                  self.Observations), type(observations)
                assert isinstance(rewards, self.Rewards), type(rewards)
                actions = env.action_space.sample()
                if done:
                    observations = env.reset()
            env.close()
Пример #12
0
class ContinualRLSetting(ActiveSetting, IncrementalSetting):
    """ Reinforcement Learning Setting where the environment changes over time.

    This is an Active setting which uses gym environments as sources of data.
    These environments' attributes could change over time following a task
    schedule. An example of this could be that the gravity increases over time
    in cartpole, making the task progressively harder as the agent interacts with
    the environment.
    """

    # The type of results returned by an RL experiment.
    Results: ClassVar[Type[Results]] = RLResults

    @dataclass(frozen=True)
    class Observations(IncrementalSetting.Observations):
        """ Observations in a continual RL Setting. """

        # Just as a reminder, these are the fields defined in the base classes:
        # x: Tensor
        # task_labels: Union[Optional[Tensor], Sequence[Optional[Tensor]]] = None

        # The 'done' part of the 'step' method. We add this here in case a
        # method were to iterate on the environments in the dataloader-style so
        # they also have access to those (i.e. for the BaselineMethod).
        done: Optional[Sequence[bool]] = None
        # Same, for the 'info' portion of the result of 'step'.
        # TODO: If we add the 'task space' (with all the attributes, for instance
        # then add it to the observations using the `AddInfoToObservations`.
        # info: Optional[Sequence[Dict]] = None

    # Image transforms to use.
    transforms: List[Transforms] = list_field()

    # Class variable that holds the dict of available environments.
    available_datasets: ClassVar[Dict[str, str]] = {
        "cartpole": "CartPole-v0",
        "pendulum": "Pendulum-v0",
        "breakout": "Breakout-v0",
        # "duckietown": "Duckietown-straight_road-v0"
    }
    # TODO: Add breakout to 'available_datasets' only when atari_py is installed.

    # Which environment (a.k.a. "dataset") to learn on.
    # The dataset could be either a string (env id or a key from the
    # available_datasets dict), a gym.Env, or a callable that returns a single environment.
    # If self.dataset isn't one of those, an error will be raised.
    dataset: str = choice(available_datasets, default="cartpole")

    # The number of tasks. By default 1 for this setting.
    nb_tasks: int = field(1, alias=["n_tasks", "num_tasks"])

    # Max number of steps per task. (Also acts as the "length" of the training
    # and validation "Datasets")
    max_steps: int = 100_000
    # Maximum episodes per task.
    # TODO: Test that the limit on the number of episodes actually works.
    max_episodes: Optional[int] = None
    # Number of steps per task. When left unset and when `max_steps` is set,
    # takes the value of `max_steps` divided by `nb_tasks`.
    steps_per_task: Optional[int] = None
    # (WIP): Number of episodes per task.
    episodes_per_task: Optional[int] = None

    # Total number of steps in the test loop. (Also acts as the "length" of the testing
    # environment.)
    test_steps: int = 10_000
    # Number of steps per task in the test loop. When left unset and when `test_steps`
    # is set, takes the value of `test_steps` divided by `nb_tasks`.
    test_steps_per_task: Optional[int] = None

    # Standard deviation of the multiplicative Gaussian noise that is used to
    # create the values of the env attributes for each task.
    task_noise_std: float = 0.2

    # Wether the task boundaries are smooth or sudden.
    smooth_task_boundaries: bool = True

    # Wether to observe the state directly, rather than pixels. This can be
    # useful to debug environments like CartPole, for instance.
    observe_state_directly: bool = False

    # Path to a json file from which to read the train task schedule.
    train_task_schedule_path: Optional[Path] = None
    # Path to a json file from which to read the validation task schedule.
    valid_task_schedule_path: Optional[Path] = None
    # Path to a json file from which to read the test task schedule.
    test_task_schedule_path: Optional[Path] = None

    # Wether observations from the environments whould include
    # the end-of-episode signal. Only really useful if your method will iterate
    # over the environments in the dataloader style
    # (as does the baseline method).
    add_done_to_observations: bool = False

    # The maximum number of steps per episode. When None, there is no limit.
    max_episode_steps: Optional[int] = None

    # NOTE: Added this `cmd=False` option to mark that we don't want to generate
    # any command-line arguments for these fields.
    train_task_schedule: Dict[int, Dict[str, float]] = dict_field(cmd=False)
    valid_task_schedule: Dict[int, Dict[str, float]] = dict_field(cmd=False)
    test_task_schedule: Dict[int, Dict[str, float]] = dict_field(cmd=False)

    train_wrappers: List[Callable[[gym.Env], gym.Env]] = list_field(cmd=False)
    valid_wrappers: List[Callable[[gym.Env], gym.Env]] = list_field(cmd=False)
    test_wrappers: List[Callable[[gym.Env], gym.Env]] = list_field(cmd=False)

    batch_size: Optional[int] = field(default=None, cmd=False)
    num_workers: Optional[int] = field(default=None, cmd=False)

    def __post_init__(self, *args, **kwargs):
        super().__post_init__(*args, **kwargs)
        self._new_random_task_on_reset: bool = False

        # Post processing of the 'dataset' field:
        if self.dataset in self.available_datasets.keys():
            # the environment name was passed, rather than an id
            # (e.g. 'cartpole' -> 'CartPole-v0").
            self.dataset = self.available_datasets[self.dataset]

        elif self.dataset not in self.available_datasets.values():
            # The passed dataset is assumed to be an environment ID, but it
            # wasn't in the dict of available datasets! We issue a warning, but
            # proceed to let the user use whatever environment they want to.
            logger.warning(
                UserWarning(
                    f"The chosen dataset/environment ({self.dataset}) isn't in the "
                    f"available_datasets dict, so we can't garantee this will work!"
                )
            )

        if isinstance(self.dataset, gym.Env) and self.batch_size:
            raise RuntimeError(
                f"Batch size should be None when a gym.Env "
                f"object is passed as `dataset`."
            )
        if not isinstance(self.dataset, (str, gym.Env)) and not callable(self.dataset):
            raise RuntimeError(
                f"`dataset` must be either a string, a gym.Env, or a callable. "
                f"(got {self.dataset})"
            )

        # Set the number of tasks depending on the increment, and vice-versa.
        # (as only one of the two should be used).
        assert self.max_steps, "assuming this should always be set, for now."
        # TODO: Clean this up, not super clear what options take precedence on
        # which other options.

        # Load the task schedules from the corresponding files, if present.
        if self.train_task_schedule_path:
            self.train_task_schedule = self.load_task_schedule(
                self.train_task_schedule_path
            )

        if self.valid_task_schedule_path:
            self.valid_task_schedule = self.load_task_schedule(
                self.valid_task_schedule_path
            )

        if self.test_task_schedule_path:
            self.test_task_schedule = self.load_task_schedule(
                self.test_task_schedule_path
            )

        if self.train_task_schedule:
            if self.steps_per_task is not None:
                # If steps per task was passed, then we overwrite the keys of the tasks
                # schedule.
                self.train_task_schedule = {
                    i * self.steps_per_task: self.train_task_schedule[step]
                    for i, step in enumerate(sorted(self.train_task_schedule.keys()))
                }
            else:
                # A task schedule was passed: infer the number of tasks from it.
                change_steps = sorted(self.train_task_schedule.keys())
                assert 0 in change_steps, "Schedule needs a task at step 0."
                # TODO: @lebrice: I guess we have to assume that the interval
                # between steps is constant for now? Do we actually depend on this
                # being the case? I think steps_per_task is only really ever used
                # for creating the task schedule, which we already have in this
                # case.
                assert (
                    len(change_steps) >= 2
                ), "WIP: need a minimum of two tasks in the task schedule for now."
                self.steps_per_task = change_steps[1] - change_steps[0]
                # Double-check that this is the case.
                for i in range(len(change_steps) - 1):
                    if change_steps[i + 1] - change_steps[i] != self.steps_per_task:
                        raise NotImplementedError(
                            f"WIP: This might not work yet if the tasks aren't "
                            f"equally spaced out at a fixed interval."
                        )

            nb_tasks = len(self.train_task_schedule)
            if self.smooth_task_boundaries:
                # NOTE: When in a ContinualRLSetting with smooth task boundaries,
                # the last entry in the schedule represents the state of the env at
                # the end of the "task". When there are clear task boundaries (i.e.
                # when in 'Class'/Task-Incremental RL), the last entry is the start
                # of the last task.
                nb_tasks -= 1
            if self.nb_tasks != 1:
                if self.nb_tasks != nb_tasks:
                    raise RuntimeError(
                        f"Passed number of tasks {self.nb_tasks} doesn't match the "
                        f"number of tasks deduced from the task schedule ({nb_tasks})"
                    )
            self.nb_tasks = nb_tasks

            self.max_steps = max(self.train_task_schedule.keys())
            if not self.smooth_task_boundaries:
                # See above note about the last entry.
                self.max_steps += self.steps_per_task

        elif self.nb_tasks:
            if self.steps_per_task:
                self.max_steps = self.nb_tasks * self.steps_per_task
            elif self.max_steps:
                self.steps_per_task = self.max_steps // self.nb_tasks

        elif self.steps_per_task:
            if self.nb_tasks:
                self.max_steps = self.nb_tasks * self.steps_per_task
            elif self.max_steps:
                self.nb_tasks = self.max_steps // self.steps_per_task

        elif self.max_steps:
            if self.nb_tasks:
                self.steps_per_task = self.max_steps // self.nb_tasks
            elif self.steps_per_task:
                self.nb_tasks = self.max_steps // self.steps_per_task

        if not all([self.nb_tasks, self.max_steps, self.steps_per_task]):
            raise RuntimeError(
                f"You need to provide at least two of 'max_steps', "
                f"'nb_tasks', or 'steps_per_task'."
            )

        assert self.max_steps == self.nb_tasks * self.steps_per_task

        if self.test_task_schedule:
            if 0 not in self.test_task_schedule:
                raise RuntimeError("Task schedules needs to include an initial task.")

            if self.test_steps_per_task is not None:
                # If steps per task was passed, then we overwrite the number of steps
                # for each task in the schedule to match.
                self.test_task_schedule = {
                    i * self.test_steps_per_task: self.test_task_schedule[step]
                    for i, step in enumerate(sorted(self.test_task_schedule.keys()))
                }

            change_steps = sorted(self.test_task_schedule.keys())
            assert 0 in change_steps, "Schedule needs to include task at step 0."

            nb_test_tasks = len(change_steps)
            if self.smooth_task_boundaries:
                nb_test_tasks -= 1
            assert (
                nb_test_tasks == self.nb_tasks
            ), "nb of tasks should be the same for train and test."

            self.test_steps_per_task = change_steps[1] - change_steps[0]
            for i in range(self.nb_tasks - 1):
                if change_steps[i + 1] - change_steps[i] != self.test_steps_per_task:
                    raise NotImplementedError(
                        "WIP: This might not work yet if the test tasks aren't "
                        "equally spaced out at a fixed interval."
                    )

            self.test_steps = max(change_steps)
            if not self.smooth_task_boundaries:
                # See above note about the last entry.
                self.test_steps += self.test_steps_per_task

        elif self.test_steps_per_task is None:
            # This is basically never the case, since the test_steps defaults to 10_000.
            assert (
                self.test_steps
            ), "need to set one of test_steps or test_steps_per_task"
            self.test_steps_per_task = self.test_steps // self.nb_tasks
        else:
            # FIXME: This is too complicated for what is is.
            # Check that the test steps must either be the default value, or the right
            # value to use in this case.
            assert self.test_steps in {10_000, self.test_steps_per_task * self.nb_tasks}
            assert (
                self.test_steps_per_task
            ), "need to set one of test_steps or test_steps_per_task"
            self.test_steps = self.test_steps_per_task * self.nb_tasks

        assert self.test_steps // self.test_steps_per_task == self.nb_tasks

        if self.smooth_task_boundaries:
            # If we're operating in the 'Online/smooth task transitions' "regime",
            # then there is only one "task", and we don't have task labels.
            # TODO: HOWEVER, the task schedule could/should be able to have more
            # than one non-stationarity! This indicates a need for a distinction
            # between 'tasks' and 'non-stationarities' (changes in the env).
            self.known_task_boundaries_at_train_time = False
            self.known_task_boundaries_at_test_time = False
            self.task_labels_at_train_time = False
            self.task_labels_at_test_time = False
            # self.steps_per_task = self.max_steps

        # Task schedules for training / validation and testing.

        # Create a temporary environment so we can extract the spaces and create
        # the task schedules.
        with self._make_env(
            self.dataset, self._temp_wrappers(), self.observe_state_directly
        ) as temp_env:
            # FIXME: Replacing the observation space dtypes from their original
            # 'generated' NamedTuples to self.Observations. The alternative
            # would be to add another argument to the MultiTaskEnv wrapper, to
            # pass down a dtype to be set on its observation_space's `dtype`
            # attribute, which would be ugly.
            assert isinstance(temp_env.observation_space, NamedTupleSpace)
            temp_env.observation_space.dtype = self.Observations
            # Populate the task schedules created above.
            if not self.train_task_schedule:
                train_change_steps = list(range(0, self.max_steps, self.steps_per_task))
                if self.smooth_task_boundaries:
                    # Add a last 'task' at the end of the 'epoch', so that the
                    # env changes smoothly right until the end.
                    train_change_steps.append(self.max_steps)
                self.train_task_schedule = self.create_task_schedule(
                    temp_env, train_change_steps,
                )

            assert self.train_task_schedule is not None
            # The validation task schedule is the same as the one used in
            # training by default.
            if not self.valid_task_schedule:
                self.valid_task_schedule = deepcopy(self.train_task_schedule)

            if not self.test_task_schedule:
                # The test task schedule is by default the same as in validation
                # except that the interval between the tasks may be different,
                # depending on the value of `self.test_steps_per_task`.
                valid_steps = sorted(self.valid_task_schedule.keys())
                valid_tasks = [self.valid_task_schedule[step] for step in valid_steps]
                self.test_task_schedule = {
                    i * self.test_steps_per_task: deepcopy(task)
                    for i, task in enumerate(valid_tasks)
                }

            # Set the spaces using the temp env.
            self.observation_space = temp_env.observation_space
            self.action_space = temp_env.action_space
            self.reward_range = temp_env.reward_range
            self.reward_space = getattr(
                temp_env,
                "reward_space",
                spaces.Box(
                    low=self.reward_range[0], high=self.reward_range[1], shape=()
                ),
            )

        del temp_env

        self.train_env: gym.Env
        self.valid_env: gym.Env
        self.test_env: gym.Env

    def create_task_schedule(
        self, temp_env: MultiTaskEnvironment, change_steps: List[int]
    ) -> Dict[int, Dict]:
        """ Create the task schedule, which maps from a step to the changes that
        will occur in the environment when that step is reached.
        
        Uses the provided `temp_env` to generate the random tasks at the steps
        given in `change_steps` (a list of integers).

        Returns a dictionary mapping from integers (the steps) to the changes
        that will occur in the env at that step.

        TODO: IDEA: Instead of just setting env attributes, use the
        `methodcaller` or `attrsetter` from the `operator` built-in package,
        that way later when we want to add support for Meta-World, we can just
        use `partial(methodcaller("set_task"), task="new_task")(env)` or
        something like that (i.e. generalize from changing an attribute to
        applying a function on the env, which would allow calling methods in
        addition to setting attributes.)
        """
        task_schedule: Dict[int, Dict] = {}
        # Start with the default task (step 0) and then add a new task at
        # intervals of `self.steps_per_task`
        for task_step in change_steps:
            if task_step == 0:
                # Start with the default task, so that we can recover the 'iid'
                # case with standard env dynamics when there is only one task
                # and no non-stationarity.
                task_schedule[task_step] = temp_env.default_task
            else:
                task_schedule[task_step] = temp_env.random_task()

        return task_schedule

    def apply(
        self, method: Method, config: Config = None
    ) -> "ContinualRLSetting.Results":
        """Apply the given method on this setting to producing some results. """
        # Use the supplied config, or parse one from the arguments that were
        # used to create `self`.
        self.config: Config
        if config is not None:
            self.config = config
            logger.debug(f"Using Config {self.config}")
        elif isinstance(getattr(method, "config", None), Config):
            self.config = method.config
            logger.debug(f"Using Config from the Method: {self.config}")
        else:
            logger.debug(f"Parsing the Config from the command-line.")
            self.config = Config.from_args(self._argv, strict=False)
            logger.debug(f"Resulting Config: {self.config}")

        # TODO: Test to make sure that this doesn't cause any other bugs with respect to
        # the display of stuff:
        # Call this method, which creates a virtual display if necessary.
        self.config.get_display()

        # TODO: Should we really overwrite the method's 'config' attribute here?
        if not getattr(method, "config", None):
            method.config = self.config

        # TODO: Remove `Setting.configure(method)` entirely, from everywhere,
        # and use the `prepare_data` or `setup` methods instead (since these
        # `configure` methods aren't using the `method` anyway.)
        method.configure(setting=self)

        # BUG This won't work if the task schedule uses callables as the values (as
        # they aren't json-serializable.)
        if self._new_random_task_on_reset:
            logger.info(
                f"Train tasks: "
                + json.dumps(list(self.train_task_schedule.values()), indent="\t")
            )
        else:
            logger.info(
                f"Train task schedule:"
                + json.dumps(self.train_task_schedule, indent="\t")
            )
        if self.config.debug:
            logger.debug(
                f"Test task schedule:"
                + json.dumps(self.test_task_schedule, indent="\t")
            )

        # Run the Training loop (which is defined in IncrementalSetting).
        results = self.main_loop(method)

        logger.info("Results summary:")
        logger.info(results.to_log_dict())
        logger.info(results.summary())
        method.receive_results(self, results=results)
        return results

        # Run the Test loop (which is defined in IncrementalSetting).
        # results: RlResults = self.test_loop(method)

    def setup(self, stage: str = None) -> None:
        # Called before the start of each task during training, validation and
        # testing.
        super().setup(stage=stage)
        if stage in {"fit", None}:
            self.train_wrappers = self.create_train_wrappers()
            self.valid_wrappers = self.create_valid_wrappers()
        elif stage in {"test", None}:
            self.test_wrappers = self.create_test_wrappers()
    
    def prepare_data(self, *args, **kwargs) -> None:
        # We don't really download anything atm.
        if self.config is None:
            self.config = Config()
        super().prepare_data(*args, **kwargs)

    def train_dataloader(
        self, batch_size: int = None, num_workers: int = None
    ) -> ActiveEnvironment:
        """Create a training gym.Env/DataLoader for the current task.
        
        Parameters
        ----------
        batch_size : int, optional
            The batch size, which in this case is the number of environments to
            run in parallel. When `None`, the env won't be vectorized. Defaults
            to None.
        num_workers : int, optional
            The number of workers (processes) to use in the vectorized env. When
            None, the envs are run in sequence, which could be very slow. Only
            applies when `batch_size` is not None. Defaults to None.

        Returns
        -------
        GymDataLoader
            A (possibly vectorized) environment/dataloader for the current task.
        """
        if not self.has_prepared_data:
            self.prepare_data()
        # NOTE: We actually want to call setup every time, so we re-create the
        # wrappers for each task.
        # if not self.has_setup_fit:
        self.setup("fit")

        batch_size = batch_size or self.batch_size
        num_workers = num_workers if num_workers is not None else self.num_workers
        env_factory = partial(
            self._make_env,
            base_env=self.dataset,
            wrappers=self.train_wrappers,
            observe_state_directly=self.observe_state_directly,
        )
        env_dataloader = self._make_env_dataloader(
            env_factory,
            batch_size=batch_size,
            num_workers=num_workers,
            max_steps=self.steps_per_task,
            max_episodes=self.episodes_per_task,
        )

        if self.monitor_training_performance:
            from sequoia.settings.passive.cl.measure_performance_wrapper import (
                MeasureRLPerformanceWrapper,
            )
            env_dataloader = MeasureRLPerformanceWrapper(
                env_dataloader, wandb_prefix=f"Train/Task {self.current_task_id}"
            )
        
        self.train_env = env_dataloader
        # BUG: There is a mismatch between the train env's observation space and the
        # shape of its observations.
        self.observation_space = self.train_env.observation_space

        return self.train_env

    def val_dataloader(
        self, batch_size: int = None, num_workers: int = None
    ) -> Environment:
        """Create a validation gym.Env/DataLoader for the current task.
        
        Parameters
        ----------
        batch_size : int, optional
            The batch size, which in this case is the number of environments to
            run in parallel. When `None`, the env won't be vectorized. Defaults
            to None.
        num_workers : int, optional
            The number of workers (processes) to use in the vectorized env. When
            None, the envs are run in sequence, which could be very slow. Only
            applies when `batch_size` is not None. Defaults to None.

        Returns
        -------
        GymDataLoader
            A (possibly vectorized) environment/dataloader for the current task.
        """
        if not self.has_prepared_data:
            self.prepare_data()
        self.setup("fit")

        env_factory = partial(
            self._make_env,
            base_env=self.dataset,
            wrappers=self.valid_wrappers,
            observe_state_directly=self.observe_state_directly,
        )
        env_dataloader = self._make_env_dataloader(
            env_factory,
            batch_size=batch_size or self.batch_size,
            num_workers=num_workers if num_workers is not None else self.num_workers,
            max_steps=self.steps_per_task,
            max_episodes=self.episodes_per_task,
        )
        self.val_env = env_dataloader
        return self.val_env

    def test_dataloader(
        self, batch_size: int = None, num_workers: int = None
    ) -> TestEnvironment:
        """Create the test 'dataloader/gym.Env' for all tasks.
        
        NOTE: This test environment isn't just for the current task, it actually
        contains the sequence of all tasks. This is different than the train or
        validation environments, since if the task labels are available at train
        time, then calling train/valid_dataloader` returns the envs for the
        current task only, and the `.fit` method is called once per task.
        
        This environment is also different in that it is wrapped with a Monitor,
        which we might eventually use to save the results/gifs/logs of the
        testing runs.

        Parameters
        ----------
        batch_size : int, optional
            The batch size, which in this case is the number of environments to
            run in parallel. When `None`, the env won't be vectorized. Defaults
            to None.
        num_workers : int, optional
            The number of workers (processes) to use in the vectorized env. When
            None, the envs are run in sequence, which could be very slow. Only
            applies when `batch_size` is not None. Defaults to None.

        Returns
        -------
        TestEnvironment
            A testing environment which keeps track of the performance of the
            actor and accumulates logs/statistics that are used to eventually
            create the 'Result' object.
        """
        if not self.has_prepared_data:
            self.prepare_data()
        self.setup("test")
        # BUG: gym.wrappers.Monitor doesn't want to play nice when applied to
        # Vectorized env, it seems..
        # FIXME: Remove this when the Monitor class works correctly with
        # batched environments.
        batch_size = batch_size or self.batch_size
        if batch_size is not None:
            logger.warn(
                UserWarning(
                    colorize(
                        f"WIP: Only support batch size of `None` (i.e., a single env) "
                        f"for the test environments of RL Settings at the moment, "
                        f"because the Monitor class from gym doesn't work with "
                        f"VectorEnvs. (batch size was {batch_size})",
                        "yellow",
                    )
                )
            )
            batch_size = None

        num_workers = num_workers if num_workers is not None else self.num_workers
        env_factory = partial(
            self._make_env,
            base_env=self.dataset,
            wrappers=self.test_wrappers,
            observe_state_directly=self.observe_state_directly,
        )
        # TODO: Pass the max_steps argument to this `_make_env_dataloader` method,
        # rather than to a `step_limit` on the TestEnvironment.
        env_dataloader = self._make_env_dataloader(
            env_factory, batch_size=batch_size, num_workers=num_workers,
        )
        # TODO: We should probably change the max_steps depending on the
        # batch size of the env.
        test_loop_max_steps = self.test_steps // (batch_size or 1)
        # TODO: Find where to configure this 'test directory' for the outputs of
        # the Monitor.
        test_dir = "results"
        # TODO: Debug wandb Monitor integration.
        self.test_env = ContinualRLTestEnvironment(
            env_dataloader,
            task_schedule=self.test_task_schedule,
            directory=test_dir,
            step_limit=test_loop_max_steps,
            config=self.config,
            force=True,
            video_callable=None if self.config.render else False,
        )
        return self.test_env

    @property
    def phases(self) -> int:
        """The number of training 'phases', i.e. how many times `method.fit` will be
        called.
        
        In the case of ContinualRL, fit is only called once, with an environment that
        shifts between all the tasks.
        """
        return 1
    
    @staticmethod
    def _make_env(
        base_env: Union[str, gym.Env, Callable[[], gym.Env]],
        wrappers: List[Callable[[gym.Env], gym.Env]] = None,
        observe_state_directly: bool = False,
    ) -> gym.Env:
        """ Helper function to create a single (non-vectorized) environment. """
        env: gym.Env
        if isinstance(base_env, str):
            if base_env.startswith("MetaMonsterKong") and observe_state_directly:
                env = gym.make(base_env, observe_state=True)
            else:
                env = gym.make(base_env)
        elif isinstance(base_env, gym.Env):
            env = base_env
        elif callable(base_env):
            env = base_env()
        else:
            raise RuntimeError(
                f"base_env should either be a string, a callable, or a gym "
                f"env. (got {base_env})."
            )
        for wrapper in wrappers:
            env = wrapper(env)
        return env

    def _make_env_dataloader(
        self,
        env_factory: Callable[[], gym.Env],
        batch_size: Optional[int],
        num_workers: Optional[int] = None,
        seed: Optional[int] = None,
        max_steps: Optional[int] = None,
        max_episodes: Optional[int] = None,
    ) -> GymDataLoader:
        """ Helper function for creating a (possibly vectorized) environment.
        
        """
        logger.debug(
            f"batch_size: {batch_size}, num_workers: {num_workers}, seed: {seed}"
        )

        env: Union[gym.Env, gym.vector.VectorEnv]
        if batch_size is None:
            env = env_factory()
        else:
            env = make_batched_env(
                env_factory,
                batch_size=batch_size,
                num_workers=num_workers,
                # TODO: Still debugging shared memory + custom spaces (e.g. Sparse).
                shared_memory=False,
            )

        ## Apply the "post-batch" wrappers:
        # from sequoia.common.gym_wrappers import ConvertToFromTensors
        # TODO: Only the BaselineMethod requires this, we should enable it only
        # from the BaselineMethod, and leave it 'off' by default.
        if self.add_done_to_observations:
            env = AddDoneToObservation(env)
        # # Convert the samples to tensors and move them to the right device.
        # env = ConvertToFromTensors(env)
        # env = ConvertToFromTensors(env, device=self.config.device)
        # Add a wrapper that converts numpy arrays / etc to Observations/Rewards
        # and from Actions objects to numpy arrays.
        env = TypedObjectsWrapper(
            env,
            observations_type=self.Observations,
            rewards_type=self.Rewards,
            actions_type=self.Actions,
        )
        # Create an IterableDataset from the env using the EnvDataset wrapper.
        dataset = EnvDataset(env, max_steps=max_steps, max_episodes=max_episodes,)

        # Create a GymDataLoader for the EnvDataset.
        env_dataloader = GymDataLoader(dataset)

        if batch_size and seed:
            # Seed each environment with its own seed (based on the base seed).
            env.seed([seed + i for i in range(env_dataloader.num_envs)])
        else:
            env.seed(seed)

        return env_dataloader

    def create_train_wrappers(self) -> List[Callable[[gym.Env], gym.Env]]:
        """Get the list of wrappers to add to each training environment.
        
        The result of this method must be pickleable when using
        multiprocessing.
        
        Returns
        -------
        List[Callable[[gym.Env], gym.Env]]
            [description]
        """
        # We add a restriction to prevent users from getting data from
        # previous or future tasks.
        # TODO: This assumes that tasks all have the same length.
        starting_step = self.current_task_id * self.steps_per_task
        max_steps = starting_step + self.steps_per_task - 1
        return self._make_wrappers(
            task_schedule=self.train_task_schedule,
            sharp_task_boundaries=self.known_task_boundaries_at_train_time,
            task_labels_available=self.task_labels_at_train_time,
            transforms=self.train_transforms,
            starting_step=starting_step,
            max_steps=max_steps,
            new_random_task_on_reset=self._new_random_task_on_reset,
        )

    def create_valid_wrappers(self) -> List[Callable[[gym.Env], gym.Env]]:
        """Get the list of wrappers to add to each validation environment.
        
        The result of this method must be pickleable when using
        multiprocessing.

        Returns
        -------
        List[Callable[[gym.Env], gym.Env]]
            [description]
            
        TODO: Decide how this 'validation' environment should behave in
        comparison with the train and test environments. 
        """
        # We add a restriction to prevent users from getting data from
        # previous or future tasks.
        # TODO: Should the validation environment only be for the current task?
        starting_step = self.current_task_id * self.steps_per_task
        max_steps = starting_step + self.steps_per_task - 1
        return self._make_wrappers(
            task_schedule=self.valid_task_schedule,
            sharp_task_boundaries=self.known_task_boundaries_at_train_time,
            task_labels_available=self.task_labels_at_train_time,
            transforms=self.val_transforms,
            starting_step=starting_step,
            max_steps=max_steps,
            new_random_task_on_reset=self._new_random_task_on_reset,
        )

    def create_test_wrappers(self) -> List[Callable[[gym.Env], gym.Env]]:
        """Get the list of wrappers to add to a single test environment.
        
        The result of this method must be pickleable when using
        multiprocessing.

        Returns
        -------
        List[Callable[[gym.Env], gym.Env]]
            [description]
        """
        return self._make_wrappers(
            task_schedule=self.test_task_schedule,
            sharp_task_boundaries=self.known_task_boundaries_at_test_time,
            task_labels_available=self.task_labels_at_test_time,
            transforms=self.test_transforms,
            starting_step=0,
            max_steps=self.max_steps,
            new_random_task_on_reset=self._new_random_task_on_reset,
        )

    def load_task_schedule(self, file_path: Path) -> Dict[int, Dict]:
        """ Load a task schedule from the given path. """
        with open(file_path) as f:
            task_schedule = json.load(f)
            return {int(k): task_schedule[k] for k in sorted(task_schedule.keys())}

    def _make_wrappers(
        self,
        task_schedule: Dict[int, Dict],
        sharp_task_boundaries: bool,
        task_labels_available: bool,
        transforms: List[Transforms],
        starting_step: int,
        max_steps: int,
        new_random_task_on_reset: bool,
    ) -> List[Callable[[gym.Env], gym.Env]]:
        """ helper function for creating the train/valid/test wrappers. 
        
        These wrappers get applied *before* the batching, if applicable.
        """
        wrappers: List[Callable[[gym.Env], gym.Env]] = []
        # NOTE: When transitions are smooth, there are no "task boundaries".
        assert sharp_task_boundaries == (not self.smooth_task_boundaries)

        # TODO: Add some kind of Wrapper around the dataset to make it
        # semi-supervised?

        if self.max_episode_steps:
            wrappers.append(
                partial(TimeLimit, max_episode_steps=self.max_episode_steps)
            )

        if is_classic_control_env(self.dataset) and not self.observe_state_directly:
            # If we are in a classic control env, and we dont want the state to
            # be fully-observable (i.e. we want pixel observations rather than
            # getting the pole angle, velocity, etc.), then add the
            # PixelObservation wrapper to the list of wrappers.
            wrappers.append(PixelObservationWrapper)
            wrappers.append(ImageObservations)

        if (
            isinstance(self.dataset, str)
            and self.dataset.lower().startswith("metamonsterkong")
            and not self.observe_state_directly
        ):
            # TODO: Do we need the AtariPreprocessing wrapper on MonsterKong?
            # wrappers.append(partial(AtariPreprocessing, frame_skip=1))
            pass
        elif is_atari_env(self.dataset):
            # TODO: Test & Debug this: Adding the Atari preprocessing wrapper.
            # TODO: Figure out the differences (if there are any) between the
            # AtariWrapper from SB3 and the AtariPreprocessing wrapper from gym.
            wrappers.append(AtariWrapper)
            # wrappers.append(AtariPreprocessing)
            wrappers.append(ImageObservations)

        # Apply image transforms if the env will have image-like obs space
        if not self.observe_state_directly:
            # wrappers.append(ImageObservations)
            # Wrapper to apply the image transforms to the env.
            wrappers.append(partial(TransformObservation, f=transforms))

        # Add a wrapper which will add non-stationarity to the environment.
        # The "task" transitions will either be sharp or smooth.
        # In either case, the task ids for each sample are added to the
        # observations, and the dicts containing the task information (i.e. the
        # current values of the env attributes from the task schedule) get added
        # to the 'info' dicts.
        if sharp_task_boundaries:
            assert self.nb_tasks >= 1
            # Add a wrapper that creates sharp tasks.
            cl_wrapper = MultiTaskEnvironment
        else:
            # Add a wrapper that creates smooth tasks.
            cl_wrapper = SmoothTransitions

        wrappers.append(
            partial(
                cl_wrapper,
                noise_std=self.task_noise_std,
                task_schedule=task_schedule,
                add_task_id_to_obs=True,
                add_task_dict_to_info=True,
                starting_step=starting_step,
                new_random_task_on_reset=new_random_task_on_reset,
                max_steps=max_steps,
            )
        )
        # If the task labels aren't available, we then add another wrapper that
        # hides that information (setting both of them to None) and also marks
        # those spaces as `Sparse`.
        if not task_labels_available:
            # NOTE: This sets the task labels to None, rather than removing
            # them entirely.
            # wrappers.append(RemoveTaskLabelsWrapper)
            wrappers.append(HideTaskLabelsWrapper)

        return wrappers

    def _temp_wrappers(self) -> List[Callable[[gym.Env], gym.Env]]:
        """ Gets the minimal wrappers needed to figure out the Spaces of the
        train/valid/test environments.
        
        This is called in the 'constructor' (__post_init__) to set the Setting's
        observation/action/reward spaces, so this should depend on as little
        state from `self` as possible, since not all attributes have been
        defined at the time when this is called. 
        """
        return self._make_wrappers(
            task_schedule=self.train_task_schedule,
            sharp_task_boundaries=self.known_task_boundaries_at_train_time,
            task_labels_available=self.task_labels_at_train_time,
            transforms=self.train_transforms,
            # These two shouldn't matter really:
            starting_step=0,
            max_steps=self.max_steps,
            new_random_task_on_reset=self._new_random_task_on_reset,
        )
Пример #13
0
def flag(default: bool, *args, **kwargs):
    return field(default=default, nargs=1, *args, **kwargs)
Пример #14
0
def constant(v: T, **kwargs) -> T:
    metadata = kwargs.setdefault("metadata", {})
    metadata["constant"] = v
    return field(default=v, init=False, **kwargs)
Пример #15
0
class TrainCfg:
    """" The hyper params to configure the training. See Supplementary C. "implementation details".
         Note: For backward compatibility, some of the variable names here are different than those mentioned in the
         paper. We explain in comments, how each variable is referenced at the paper.
    """

    metrics: MetricsCfg

    batch_size: int = 2048
    """ batch size """

    lr: float = 0.003
    """ initial learning rate """

    max_epoch: int = 5
    """ max number of epochs """

    alternate_ys: int = 21
    """ Whether and how to use alternate training. 0: no alternation|12: object then attr|21: attr then obj"""

    lr_step2: float = 3e-05  #
    """ Step 2 initial learning rate. Only relevant if alternate_ys != 0 """

    max_epoch_step2: int = 1000
    """ Step 2 max number of epochs. Only relevant if alternate_ys != 0 """

    weight_decay: float = 0.1
    """ weight_decay """

    HSIC_coeff: float = 10
    """ \lambda_rep in paper """

    alphaH: float = 0
    """ \lambda_oh in paper """

    alphaH_step2 = -1
    """ Step 2 \lambda_oh. Only relevant if alternate_ys != 0. If set to -1, then take --alphaH value """

    lambda_CE: float = 1
    """ a coefficient for L_data """

    lambda_feat: float = 1
    """ \lambda_ao in paper """

    lambda_ao_emb: float = 0
    """ \lambda_ao when projecting pretrained image features to the feature space \X 
    (as explained for Zappos in section C.1)
    Note: --lambda_feat and --lambda_ao_emb cant be both non-zero (we raise exception for this case).  
    """

    lambda_aux_disjoint: float = 100
    """ \lambda_icore in paper """

    lambda_aux_img: float = 10
    """ \lambda_ig in paper, when --lambda_feat>0"""

    lambda_aux: float = 0
    """ \lambda_ig in paper, when --lambda_ao_emb>0"""

    mu_img_feat: float = 0.1
    """ \lambda_ao at inference time """

    balanced_loss: bool = True
    """ Weighed the loss of ||φa−ha||^2 and ||φo−ho||^2 according to the respective attribute and object frequencies in 
        the training set (Described in supplementary C.2). """

    triplet_loss_margin: float = 0.5
    """ The margin for the triplet loss. Same value as used by attributes-as-operators """

    optimizer_name: str = 'Adam'

    seed: int = 0
    """ random seed """

    test_batchsize: int = -1
    """batch-size for inference; default uses the training batch size"""

    verbose: bool = True
    num_workers: int = 8
    shuffle_eval_set: bool = True
    n_iter: int = field(init=False)
    mu_disjoint: float = field(init=False)
    mu_ao_emb: float = field(init=False)
    primary_early_stop_metric: EarlyStopMetric = field(init=False)
    freeze_class1: bool = field(init=False)
    freeze_class2: bool = field(init=False)
    Y12_balance_coeff: float = field(init=False)

    def __post_init__(self):
        # sanity checks (assertions)
        assert (self.alternate_ys in [0, 12, 21])
        assert not ((self.lambda_ao_emb > 0) and (self.lambda_feat > 0))
        if self.lambda_feat == 0:
            assert (self.mu_img_feat == 0)

        # assignments
        if self.test_batchsize <= 0:
            self.test_batchsize = self.batch_size
        if self.alphaH_step2 < 0:
            self.alphaH_step2 = self.alphaH

        self.mu_disjoint = self.lambda_CE
        self.mu_ao_emb = self.lambda_ao_emb
        self.primary_early_stop_metric = EarlyStopMetric('epoch', 'max')
        self.Y12_balance_coeff = 0.5
        self.freeze_class1 = False
        self.freeze_class2 = False
        self.n_iter = -1  # Should be updated after data is loaded

    def set_n_iter(self, num_train_samples, max_epoch=None):
        if max_epoch is None:
            max_epoch = self.max_epoch
        self.n_iter = int(
            (max_epoch) * np.ceil(num_train_samples / self.batch_size))

    def __getitem__(self, key):
        """ Allow accessing instance attributes as dictionary keys """
        return getattr(self, key)
Пример #16
0
class Loss(Serializable):
    """ Object used to store the losses and metrics. 

    Used to simplify the return type of the different `get_loss` functions and
    also to help in debugging models that use a combination of different loss
    signals.

    TODO: Add some kind of histogram plot to show the relative contribution of
    each loss signal?
    TODO: Maybe create a `make_plots()` method to create wandb plots?
    """
    name: str
    loss: Tensor = 0.  # type: ignore
    losses: Dict[str, "Loss"] = dict_field()
    # NOTE: By setting to_dict=False below, we don't include the tensors when
    # serializing the attributes.
    # TODO: Does that also mean that the tensors can't be pickled (moved) by
    # pytorch-lightning during training? Is there a case where that would be
    # useful?
    tensors: Dict[str, Tensor] = dict_field(repr=False, to_dict=False)
    metrics: Dict[str, Union[Metrics, Tensor]] = dict_field()
    # When multiplying the Loss by a value, this keep track of the coefficients
    # used, so that if we wanted to we could recover the 'unscaled' loss.
    _coefficient: Union[float, Tensor] = field(1.0, repr=False)

    x: InitVar[Optional[Tensor]] = None
    h_x: InitVar[Optional[Tensor]] = None
    y_pred: InitVar[Optional[Tensor]] = None
    y: InitVar[Optional[Tensor]] = None

    def __post_init__(self,
                      x: Tensor = None,
                      h_x: Tensor = None,
                      y_pred: Tensor = None,
                      y: Tensor = None):
        assert self.name, "Loss objects should be given a name!"
        if self.name not in self.metrics:
            # Create a Metrics object if given the necessary tensors.
            metrics = get_metrics(x=x, h_x=h_x, y_pred=y_pred, y=y)
            if metrics:
                self.metrics[self.name] = metrics
        self._device: torch.device = None
        for name in list(self.tensors.keys()):
            tensor = self.tensors[name]
            if not isinstance(tensor, Tensor):
                self.tensors[name] = torch.as_tensor(tensor)
            elif self._device is None:
                self._device = tensor.device

    def to_pl_dict(self, verbose: bool = False) -> Dict:
        """Creates a pytorch-lightning-style dict from this Loss object.

        Can be used as a return value to the `[training/validation/test]_step'
        methods of a `LightningModule`, like so:
        ```python
        # (inside some LightningModule)
        def training_step(self, batch, ...) -> Dict:
            x, y = batch
            y_pred = self.forward(x)
            nce = self.loss_fn(y_pred, y)
            loss: Loss = Loss("train", loss=nce, y_pred=y_pred, y=y)
            return loss.to_pl_dict()
        ```

        Args:
            verbose (bool, optional): Wether to keep things short or to include
                everything into the log dictionary. Defaults to False.

        Returns:
            Dict: A dictionary with the usual 'loss', 'log' and 'progress_bar'
                keys, and additionally with a copy of 'self' at the key
                'loss_object'
        """
        return {
            "loss": self.loss,
            "log": self.to_log_dict(verbose=verbose),
            "progress_bar": self.to_pbar_message(),
            "loss_object": self,
        }

    @property
    def total_loss(self) -> Tensor:
        return self.loss
    
    @property
    def requires_grad(self) -> bool:
        """ Returns wether the loss tensor in this object requires grad. """
        return isinstance(self.loss, Tensor) and self.loss.requires_grad
    
    def backward(self, *args, **kwargs):
        """ Calls `self.loss.backward(*args, **kwargs)`. """
        return self.loss.backward(*args, **kwargs)
    
    @property
    def metric(self) -> Optional[Metrics]:
        """Shortcut for `self.metrics[self.name]`.

        Returns:
            Optional[Metrics]: The main metrics associated with this Loss.
        """
        return self.metrics.get(self.name)

    @metric.setter
    def metric(self, value: Metrics) -> None:
        """Shortcut for `self.metrics[self.name] = value`.

        Parameters
        ----------
        value : Metrics
            The main metrics associated with this Loss.
        """
        assert self.name not in self.metrics, "There's already be a metric?"
        self.metrics[self.name] = value

    @property
    def accuracy(self) -> float:
        if isinstance(self.metric, ClassificationMetrics):
            return self.metric.accuracy

    @property
    def mse(self) -> Tensor:
        assert isinstance(self.metric, RegressionMetrics), self
        return self.metric.mse

    def __add__(self, other: Union["Loss", Any]) -> "Loss":
        """Adds two Loss instances together.
        
        Adds the losses, total loss and metrics. Overwrites the tensors.
        Keeps the name of the first one. This is useful when doing something
        like:
        
        ```
        loss = Loss("Test")
        for x, y in dataloader:
            loss += model.get_loss(x=x, y=y)
        ```      
        
        Returns
        -------
        Loss
            The merged/summed up Loss.
        """
        if other == 0:
            return self
        if not isinstance(other, Loss):
            return NotImplemented
        name = self.name
        loss = self.loss + other.loss
        
        if self.name == other.name:
            losses  = add_dicts(self.losses, other.losses)
            metrics = add_dicts(self.metrics, other.metrics)
        else:
            # IDEA: when the names don't match, store the entire Loss
            # object into the 'losses' dict, rather than a single loss tensor.
            losses = add_dicts(self.losses, {other.name: other})
            # TODO: setting in the 'metrics' dict, we are duplicating the
            # metrics, since they now reside in the `self.metrics[other.name]`
            # and `self.losses[other.name].metrics` attributes.
            metrics = self.metrics
            # metrics = add_dicts(self.metrics, {other.name: other.metrics})
        
        tensors = add_dicts(self.tensors, other.tensors, add_values=False)
        return Loss(
            name=name,
            loss=loss,
            losses=losses,
            tensors=tensors,
            metrics=metrics,
            _coefficient=self._coefficient,
        )

    def __iadd__(self, other: Union["Loss", Any]) -> "Loss":
        """Adds Loss to `self` in-place.
        
        Adds the losses, total loss and metrics. Overwrites the tensors.
        Keeps the name of the first one. This is useful when doing something
        like:
        
        ```
        loss = Loss("Test")
        for x, y in dataloader:
            loss += model.get_loss(x=x, y=y)
        ```
        
        Returns
        -------
        Loss
            `self`: The merged/summed up Loss.
        """
        self.loss = self.loss + other.loss
        
        if self.name == other.name:
            self.losses  = add_dicts(self.losses, other.losses)
            self.metrics = add_dicts(self.metrics, other.metrics)
        else:
            # IDEA: when the names don't match, store the entire Loss
            # object into the 'losses' dict, rather than a single loss tensor.
            self.losses = add_dicts(self.losses, {other.name: other})
        
        self.tensors = add_dicts(self.tensors, other.tensors, add_values=False)
        return self

    def __radd__(self, other: Any):
        """Addition operator for when forward addition returned `NotImplemented`.

        For example, doing something like `None + Loss()` will use __radd__,
        whereas doing `Loss() + None` will use __add__.
        """
        if other is None:
            return self
        elif other == 0:
            return self
        if isinstance(other, Tensor):
            # TODO: Other could be a loss tensor, maybe create a Loss object for it?
            pass
        return NotImplemented

    def __mul__(self, factor: Union[float, Tensor]) -> "Loss":
        """ Scale each loss tensor by `coefficient`.

        Returns
        -------
        Loss
            returns a scaled Loss instance.
        """
        result = Loss(
            name=self.name,
            loss=self.loss * factor,
            losses={
                k: value * factor for k, value in self.losses.items()
            },
            metrics=self.metrics,
            tensors=self.tensors,
            _coefficient=self._coefficient * factor,
        )
        return result

    def __rmul__(self, factor: Union[float, Tensor]) -> "Loss":
        # assert False, f"rmul: {factor}"
        return self.__mul__(factor)

    def __truediv__(self, coefficient: Union[float, Tensor]) -> "Loss":
        return self * (1 / coefficient)

    @property
    def unscaled_losses(self):
        """ Recovers the 'unscaled' version of this loss.

        TODO: This isn't used anywhere. We could probably remove it.
        """
        return {
            k: value / self._coefficient for k, value in self.losses.items()
        }

    def to_log_dict(self, verbose: bool = False) -> Dict[str, Union[str, float, Dict]]:
        """Creates a dictionary to be logged (e.g. by `wandb.log`).

        Args:
            verbose (bool, optional): Wether to include a lot of information, or
            to only log the 'essential' stuff. See the `cleanup` function for
            more info. Defaults to False.

        Returns:
            Dict: A dict containing the things to be logged.
        """
        # TODO: Could also produce some wandb plots and stuff here when verbose?
        log_dict: Dict[str, Union[str, float, Dict, Tensor]] = {}
        log_dict["loss"] = round(float(self.loss), 6)

        for name, metric in self.metrics.items():
            if isinstance(metric, Serializable):
                log_dict[name] = metric.to_log_dict(verbose=verbose)
            else:
                log_dict[name] = metric

        for name, loss in self.losses.items():
            if isinstance(loss, Serializable):
                log_dict[name] = loss.to_log_dict(verbose=verbose)
            else:
                log_dict[name] = loss

        log_dict = add_prefix(log_dict, prefix=self.name, sep="/")
        keys_to_remove: List[str] = []
        if not verbose:
            # when NOT verbose, remove any entries with this matching key.
            # TODO: add/remove keys here if you want to customize what doesn't get logged to wandb.
            # TODO: Could maybe make this a class variable so that it could be
            # extended/overwritten, but that sounds like a bit too much rn.
            keys_to_remove = [
                "n_samples",
                "name",
                "confusion_matrix",
                "class_accuracy",
                "_coefficient",
            ]
        result = cleanup(log_dict, keys_to_remove=keys_to_remove, sep="/") 
        return result
 
    def to_pbar_message(self) -> Dict[str, float]:
        """ Smaller, less-detailed version of `to_log_dict()` for progress bars.
        """
        # NOTE: PL actually doesn't seem to accept strings as values 
        message: Dict[str, Union[str, float]] = {}
        message["Loss"] = float(self.loss)

        for name, metric in self.metrics.items():
            if isinstance(metric, Metrics):
                message[name] = metric.to_pbar_message()
            else:
                message[name] = metric

        for name, loss_info in self.losses.items():
            message[name] = loss_info.to_pbar_message()

        message = add_prefix(message, prefix=self.name, sep=" ")

        return cleanup(message, sep=" ")



    def clear_tensors(self) -> None:
        """ Clears the `tensors` attribute of `self` and of sublosses.
        
        NOTE: This could be useful if you want to save some space/compute, but
        it isn't being used atm, and there's no issue. You might want to call
        this if you are storing big tensors (or passing them to the constructor)
        """
        self.tensors.clear()
        for _, loss in self.losses.items():
            loss.clear_tensors()
        return self

    def absorb(self, other: "Loss") -> None:
        """Absorbs `other` into `self`, merging the losses and metrics.

        Args:
            other (Loss): Another loss to 'merge' into this one.
        """
        new_name = self.name
        old_name = other.name
        # Here we create a new 'other' and use __iadd__ to merge the attributes.
        new_other = Loss(name=new_name)
        new_other.loss = other.loss
        # We also replace the name in the keys, if present.
        new_other.metrics = {
            k.replace(old_name, new_name): v for k, v in other.metrics.items() 
        }
        new_other.losses = {
            k.replace(old_name, new_name): v for k, v in other.losses.items() 
        }
        self += new_other

    def all_metrics(self) -> Dict[str, Metrics]:
        """ Returns a 'cleaned up' dictionary of all the Metrics objects. """
        assert self.name
        result: Dict[str, Metrics] = {}
        result.update(self.metrics)

        for name, loss in self.losses.items():
            # TODO: Aren't we potentially colliding with 'self.metrics' here?
            subloss_metrics = loss.all_metrics()
            for key, metric in subloss_metrics.items():
                assert key not in result, (
                    f"Collision in metric keys of subloss {name}: key={key}, "
                    f"result={result}"
                )
                result[key] = metric
        result = add_prefix(result, prefix=self.name, sep="/")
        return result
Пример #17
0
class Example2:
    # (This argument can be passed either as "-i" or "--input_dir")
    input_dir: str = field("./in", alias="-i")
    # (This argument can be passed either as "-o", "--out", or "--output_dir")
    output_dir: str = field("./out", alias=["-o", "--out"])
Пример #18
0
 class Foo(TestSetup):
     some_int: int = field(type=int, default=-1, nargs="+")
Пример #19
0
class Foo:
    flag: bool = field(alias=["-f", "-flag"], action="store_true")
    # wether or not to store some value.
    no_cache: bool = field(action="store_false")
Пример #20
0
 class Foo(TestSetup):
     output_dir: str = field(type=str, nargs="?", action=CustomAction)
Пример #21
0
class ContainerWithList(TestSetup):
    list_of_class_c: List[ClassC] = field(
        default_factory=lambda: [ClassC()] * 2)
Пример #22
0
class CustomMethod(BaselineMethod, target_setting=Setting):
    """ Example methods which adds regularization to the baseline in RL and SL.
    
    This extends the `BaselineMethod` by adding the simple regularization
    auxiliary task defined above to the `BaselineModel`.
    
    NOTE: Since this class inherits from `BaselineMethod`, which targets the
    `Setting` setting, i.e. the "root" node, it is applicable to all settings,
    both in RL and SL. However, you could customize the `target_setting`
    argument above to limit this to any particular subtree (only SL, only RL,
    only when task labels are present, etc).
    """

    # Hyper-parameters of the customized Baseline Model used by this method.
    hparams: CustomizedBaselineModel.HParams = field(
        default_factory=CustomizedBaselineModel.HParams)

    def __init__(
        self,
        hparams: CustomizedBaselineModel.HParams = None,
        config: Config = None,
        trainer_options: TrainerConfig = None,
        **kwargs,
    ):
        super().__init__(
            hparams=hparams,
            config=config,
            trainer_options=trainer_options,
            **kwargs,
        )

    def create_model(self, setting: Setting) -> CustomizedBaselineModel:
        """ Creates the Model to be used for the given `Setting`. """
        return CustomizedBaselineModel(setting=setting,
                                       hparams=self.hparams,
                                       config=self.config)

    def configure(self, setting: Setting):
        """ Configure this Method before being trained / tested on this Setting.
        """
        super().configure(setting)

        # For example, change the value of the coefficient of our
        # regularization loss when in RL vs SL:
        if isinstance(setting, RLSetting):
            self.hparams.simple_reg.coefficient = 0.01
        else:
            self.hparams.simple_reg.coefficient = 1.0

    def fit(self, train_env: Environment, valid_env: Environment):
        """ Called by the Setting to let the Method train on a given task.
        
        You can do whatever you want with the train and valid
        environments. As it is currently, in most `Settings`, the valid
        environment will contain data from only the current task. (See issue at
        https://github.com/lebrice/Sequoia/issues/46 for more context).
        """
        return super().fit(train_env=train_env, valid_env=valid_env)

    @classmethod
    def add_argparse_args(cls, parser: ArgumentParser, dest: str = ""):
        """Adds command-line arguments for this Method to an argument parser.
        
        NOTE: This doesn't do anything differently than the base implementation,
        but it's included here just for illustration purposes.
        """
        # 'dest' is where the arguments will be stored on the namespace.
        dest = dest or camel_case(cls.__qualname__)
        # Add all command-line arguments. This adds arguments for all fields of
        # this dataclass.
        parser.add_arguments(cls, dest=dest)
        # You could add arguments here if you wanted to:
        # parser.add_argument("--foo", default=1.23, help="example argument")

    @classmethod
    def from_argparse_args(cls, args: Namespace, dest: str = ""):
        """ Create an instance of this class from the parsed arguments. """
        # Retrieve the parsed arguments:
        dest = dest or camel_case(cls.__qualname__)
        method: CustomMethod = getattr(args, dest)
        # You could retrieve other arguments like so:
        # foo: int = args.foo
        return method
Пример #23
0
 class Person(Serializable):
     name: str = field(encoding_fn=lambda s: s.upper(),
                       decoding_fn=lambda s: s.lower())
     age: int = 0
Пример #24
0
class IncrementalRLSetting(ContinualRLSetting):
    """ Continual RL setting the data is divided into 'tasks' with clear boundaries.

    By default, the task labels are given at train time, but not at test time.

    TODO: Decide how to implement the train procedure, if we give a single
    dataloader, we might need to call the agent's `on_task_switch` when we reach
    the task boundary.. Or, we could produce one dataloader per task, and then
    implement a custom `fit` procedure in the CLTrainer class, that loops over
    the tasks and calls the `on_task_switch` when needed.
    """

    # The number of tasks. By default 0, which means that it will be set
    # depending on other fields in __post_init__, or eventually be just 1.
    nb_tasks: int = field(0, alias=["n_tasks", "num_tasks"])
    # Wether the task boundaries are smooth or sudden.
    smooth_task_boundaries: bool = constant(False)
    # Wether to give access to the task labels at train time.
    task_labels_at_train_time: bool = True
    # Wether to give access to the task labels at test time.
    task_labels_at_test_time: bool = False

    # Class variable that holds the dict of available environments.
    available_datasets: ClassVar[Dict[str, str]] = dict_union(
        ContinualRLSetting.available_datasets,
        {"monsterkong": "MetaMonsterKong-v0"},
    )
    dataset: str = "CartPole-v0"

    def __post_init__(self, *args, **kwargs):
        if not self.nb_tasks:
            # TODO: In case of the metaworld envs, we could device the 'default' nb of
            # tasks to use based on the number of available tasks
            pass

        super().__post_init__(*args, **kwargs)

        if self.dataset == "MetaMonsterKong-v0":
            # TODO: Limit the episode length in monsterkong?
            # TODO: Actually end episodes when reaching a task boundary, to force the
            # level to change?
            self.max_episode_steps = self.max_episode_steps or 500

        # FIXME: Really annoying little bugs with these three arguments!
        self.nb_tasks = self.max_steps // self.steps_per_task

    def _setup_fields_using_temp_env(self, temp_env: MultiTaskEnvironment):
        """ Setup some of the fields on the Setting using a temporary environment.

        This temporary environment only lives during the __post_init__() call.
        """
        super()._setup_fields_using_temp_env(temp_env)
        # TODO: If the dataset has a `max_path_length` attribute, then it's probably
        # a Mujoco / metaworld / etc env, and so we set a limit on the episode length to
        # avoid getting an error.
        max_path_length: Optional[int] = getattr(temp_env, "max_path_length",
                                                 None)
        if self.max_episode_steps is None and max_path_length is not None:
            assert max_path_length > 0
            self.max_episode_steps = temp_env.max_path_length

    @property
    def phases(self) -> int:
        """The number of training 'phases', i.e. how many times `method.fit` will be
        called.

        In this Incremental-RL Setting, fit is called once per task.
        (Same as ClassIncrementalSetting in SL).
        """
        return self.nb_tasks

    @staticmethod
    def _make_env(
        base_env: Union[str, gym.Env, Callable[[], gym.Env]],
        wrappers: List[Callable[[gym.Env], gym.Env]] = None,
        observe_state_directly: bool = False,
    ) -> gym.Env:
        """ Helper function to create a single (non-vectorized) environment.

        This is also used to create the env whenever `self.dataset` is a string that
        isn't registered in gym. This happens for example when using an environment from
        meta-world (or mtenv).
        """
        # Check if the env is registed in a known 'third party' gym-like package, and if
        # needed, create the base env in the way that package requires.
        if isinstance(base_env, str):
            env_id = base_env

            # Check if the id belongs to mtenv
            if mtenv_installed and env_id in mtenv_envs:
                from mtenv import make

                base_env = make(env_id)
                # Add a wrapper that will remove the task information, because we use
                # the same MultiTaskEnv wrapper for all the environments.
                wrappers.insert(0, MTEnvAdapterWrapper)

            if metaworld_installed and env_id in metaworld_envs:
                # TODO: Should we use a particular benchmark here?
                # For now, we find the first benchmark that has an env with this name.
                for benchmark_class in [metaworld.ML10]:
                    benchmark = benchmark_class()
                    if env_id in benchmark.train_classes.keys():
                        # TODO: We can either let the base_env be an env type, or
                        # actually instantiate it.
                        base_env: Type[MetaWorldEnv] = benchmark.train_classes[
                            env_id]
                        # NOTE: (@lebrice) Here I believe it's better to just have the
                        # constructor, that way we re-create the env for each task.
                        # I think this might be better, as I don't know for sure that
                        # the `set_task` can be called more than once in metaworld.
                        # base_env = base_env_type()
                        break
                else:
                    raise NotImplementedError(
                        f"Can't find a metaworld benchmark that uses env {env_id}"
                    )

        return ContinualRLSetting._make_env(
            base_env=base_env,
            wrappers=wrappers,
            observe_state_directly=observe_state_directly,
        )

    def create_task_schedule(self, temp_env: MultiTaskEnvironment,
                             change_steps: List[int]) -> Dict[int, Dict]:
        task_schedule: Dict[int, Dict] = {}

        if monsterkong_installed:
            if isinstance(temp_env.unwrapped, MetaMonsterKongEnv):
                for i, task_step in enumerate(change_steps):
                    task_schedule[task_step] = {"level": i}
                return task_schedule

        if isinstance(temp_env.unwrapped, MTEnv):
            for i, task_step in enumerate(change_steps):
                task_schedule[task_step] = operator.methodcaller(
                    "set_task_state", i)
            return task_schedule

        if isinstance(temp_env.unwrapped, (MetaWorldEnv, MujocoEnv)):
            # TODO: Which benchmark to choose?
            base_env = temp_env.unwrapped
            found = False
            # Find the benchmark that contains this type of env.
            for benchmark_class in [metaworld.ML10]:
                benchmark = benchmark_class()
                for env_name, env_class in benchmark.train_classes.items():
                    if isinstance(base_env, env_class):
                        # Found the right benchmark that contains this env class, now
                        # create the task schedule using
                        # the tasks.
                        found = True
                        break
                if found:
                    break
            if not found:
                raise NotImplementedError(
                    f"Can't find a benchmark with env class {type(base_env)}!")

            # `benchmark` is here the right benchmark to use to create the tasks.
            training_tasks = [
                task for task in benchmark.train_tasks
                if task.env_name == env_name
            ]
            task_schedule = {
                step: operator.methodcaller("set_task", task)
                for step, task in zip(change_steps, training_tasks)
            }
            return task_schedule

        return super().create_task_schedule(temp_env=temp_env,
                                            change_steps=change_steps)

    def create_train_wrappers(self):
        return super().create_train_wrappers()
Пример #25
0
 class Foo(TestSetup):
     output_dir: str = field(
         default="/out",
         alias=["-o", "--out"],
         choices=["/out", "/bob"]
     )
Пример #26
0
 class A:
     batch_size: int = field(default=10, cmd=False)
Пример #27
0
 class Foo(TestSetup):
     output_dir: str = field(type=str, nargs=2)
Пример #28
0
class ClassificationMetrics(Metrics):
    # fields we generate from the confusion matrix (if provided) or from the
    # forward pass tensors.
    accuracy: float = 0.
    confusion_matrix: Optional[Union[Tensor, np.ndarray]] = field(default=None, repr=False, compare=False)
    class_accuracy: Optional[Union[Tensor, np.ndarray]] = field(default=None, repr=False, compare=False)

    # Optional arguments used to create the attributes of the metrics above.
    # NOTE: These wont become attributes on the object, just args to postinit.
    x:      InitVar[Optional[Tensor]] = None
    h_x:    InitVar[Optional[Tensor]] = None
    y_pred: InitVar[Optional[Tensor]] = None
    y:      InitVar[Optional[Tensor]] = None
    num_classes: InitVar[Optional[int]] = None
    
    def __post_init__(self,
                      x: Tensor = None,
                      h_x: Tensor = None,
                      y_pred: Tensor = None,
                      y: Tensor = None,
                      num_classes: int = None):

        super().__post_init__(x=x, h_x=h_x, y_pred=y_pred, y=y)

        if self.confusion_matrix is None and y_pred is not None and y is not None:
            self.confusion_matrix = get_confusion_matrix(y_pred=y_pred, y=y, num_classes=num_classes)

        #TODO: add other useful metrics (potentially ones using x or h_x?)
        if self.confusion_matrix is not None:
            self.accuracy = get_accuracy(self.confusion_matrix)
            self.accuracy = round(self.accuracy, 6)
            self.class_accuracy = get_class_accuracy(self.confusion_matrix)

    @property
    def objective_name(self) -> str:
        return "Accuracy"

    def __add__(self, other: "ClassificationMetrics") -> "ClassificationMetrics":
        confusion_matrix: Optional[Tensor] = None
        if self.n_samples == 0:
            return other
        if not isinstance(other, ClassificationMetrics):
            return NotImplemented
        
        # Create the 'sum' confusion matrix:
        confusion_matrix: Optional[np.ndarray] = None
        if self.confusion_matrix is None and other.confusion_matrix is not None:
            confusion_matrix = other.confusion_matrix.clone()
        elif other.confusion_matrix is None:
            confusion_matrix = self.confusion_matrix.clone()
        else:
            confusion_matrix = self.confusion_matrix + other.confusion_matrix
        
        result = ClassificationMetrics(
            n_samples=self.n_samples + other.n_samples,
            confusion_matrix=confusion_matrix,
            num_classes=self.num_classes,
        )
        return result

    def to_log_dict(self, verbose=False):
        log_dict = super().to_log_dict(verbose=verbose)
        log_dict["accuracy"] = self.accuracy
        if verbose:
            # Maybe add those as plots, rather than tensors?
            log_dict["class_accuracy"] = self.class_accuracy
            log_dict["confusion_matrix"] = self.confusion_matrix
        return log_dict
    
    # def __str__(self):
    #     s = super().__str__()
    #     s = s.replace(f"accuracy={self.accuracy}", f"accuracy={self.accuracy:.3%}")
    #     return s

    def to_pbar_message(self) -> Dict[str, Union[str, float]]:
        message = super().to_pbar_message()
        message["acc"] = float(self.accuracy)
        return message

    def detach(self) -> "ClassificationMetrics":
        return ClassificationMetrics(
            n_samples=detach(self.n_samples),
            accuracy=float(self.accuracy),
            class_accuracy=detach(self.class_accuracy),
            confusion_matrix=detach(self.confusion_matrix),
        )

    def to(self, device: Union[str, torch.device]) -> "ClassificationMetrics":
        """Returns a new Metrics with all the attributes 'moved' to `device`."""
        return ClassificationMetrics(
            n_samples=move(self.n_samples, device),
            accuracy=move(self.accuracy, device),
            class_accuracy=move(self.class_accuracy, device),
            confusion_matrix=move(self.confusion_matrix, device),
        )

    @property
    def objective(self) -> float:
        return float(self.accuracy)
Пример #29
0
 class Foo(TestSetup):
     some_int: int = field(type=int, nargs="*")
Пример #30
0
class Setting(
    SettingABC,
    Parseable,
    Serializable,
    LightningDataModule,
    Generic[EnvironmentType],
    metaclass=SettingMeta,
):
    """ Base class for all research settings in ML: Root node of the tree.

    A 'setting' is loosely defined here as a learning problem with a specific
    set of assumptions, restrictions, and an evaluation procedure.

    For example, Reinforcement Learning is a type of Setting in which we assume
    that an Agent is able to observe an environment, take actions upon it, and
    receive rewards back from the environment. Some of the assumptions include
    that the reward is dependant on the action taken, and that the actions have
    an impact on the environment's state (and on the next observations the agent
    will receive). The evaluation procedure consists in trying to maximize the
    reward obtained from an environment over a given number of steps.

    This 'Setting' class should ideally represent the most general learning
    problem imaginable, with almost no assumptions about the data or evaluation
    procedure.

    This is a dataclass. Its attributes are can also be used as command-line
    arguments using `simple_parsing`.

    Abstract (required) methods:
    - **apply** Applies a given Method on this setting to produce Results.
    - **prepare_data** (things to do on 1 GPU/TPU not on every GPU/TPU in distributed mode).
    - **setup**  (things to do on every accelerator in distributed mode).
    - **train_dataloader** the training environment/dataloader.
    - **val_dataloader** the val environments/dataloader(s).
    - **test_dataloader** the test environments/dataloader(s).

    "Abstract"-ish (required) class attributes:
    - `Results`: The class of Results that are created when applying a Method on
      this setting.
    - `Observations`: The type of Observations that will be produced  in this
        setting.
    - `Actions`: The type of Actions that are expected from this setting.
    - `Rewards`: The type of Rewards that this setting will (potentially) return
      upon receiving an action from the method.
    """

    # ---------- Class Variables -------------
    # Fields in this block are class attributes. They don't create command-line
    # arguments.

    # Type of Observations that the dataloaders (a.k.a. "environments") will
    # produce for this type of Setting.
    Observations: ClassVar[Type[Observations]] = Observations
    # Type of Actions that the dataloaders (a.k.a. "environments") will receive
    # through their `send` method, for this type of Setting.
    Actions: ClassVar[Type[Actions]] = Actions
    # Type of Rewards that the dataloaders (a.k.a. "environments") will return
    # after receiving an action, for this type of Setting.
    Rewards: ClassVar[Type[Rewards]] = Rewards

    # The type of Results that are given back when a method is applied on this
    # Setting. The `Results` class basically defines the 'evaluation metric' for
    # a given type of setting. See the `Results` class for more info.
    Results: ClassVar[Type[Results]] = Results

    available_datasets: ClassVar[Dict[str, Any]] = {}

    # Transforms to be applied to the observatons of the train/valid/test
    # environments.
    transforms: Optional[List[Transforms]] = None

    # Transforms to be applied to the training datasets.
    train_transforms: Optional[List[Transforms]] = None
    # Transforms to be applied to the validation datasets.
    val_transforms: Optional[List[Transforms]] = None
    # Transforms to be applied to the testing datasets.
    test_transforms: Optional[List[Transforms]] = None

    # Fraction of training data to use to create the validation set.
    # (Only applicable in Passive settings.)
    val_fraction: float = 0.2

    # TODO: Still not sure where exactly we should be adding the 'batch_size'
    # and 'num_workers' arguments. Adding it here for now with cmd=False, so
    # that they can be passed to the constructor of the Setting.
    batch_size: Optional[int] = field(default=None, cmd=False)
    num_workers: Optional[int] = field(default=None, cmd=False)

    # # TODO: Add support for semi-supervised training.
    # # Fraction of the dataset that is labeled.
    # labeled_data_fraction: int = 1.0
    # # Number of labeled examples.
    # n_labeled_examples: Optional[int] = None

    def __post_init__(
        self,
        observation_space: gym.Space = None,
        action_space: gym.Space = None,
        reward_space: gym.Space = None,
    ):
        """ Initializes the fields of the setting that weren't set from the
        command-line.
        """
        logger.debug("__post_init__ of Setting")
        # BUG: simple-parsing sometimes parses a list with a single item, itself the
        # list of transforms. Not sure if this still happens.

        def is_list_of_list(v: Any) -> bool:
            return isinstance(v, list) and len(v) == 1 and isinstance(v[0], list)

        if is_list_of_list(self.train_transforms):
            self.train_transforms = self.train_transforms[0]
        if is_list_of_list(self.val_transforms):
            self.val_transforms = self.val_transforms[0]
        if is_list_of_list(self.test_transforms):
            self.test_transforms = self.test_transforms[0]

        if all(
            t is None
            for t in [
                self.transforms,
                self.train_transforms,
                self.val_transforms,
                self.test_transforms,
            ]
        ):
            # Use these two transforms by default if no transforms are passed at all.
            # TODO: Remove this after the competition perhaps.
            self.transforms = Compose([Transforms.to_tensor, Transforms.three_channels])

        # If the constructor is called with just the `transforms` argument, like this:
        # <SomeSetting>(dataset="bob", transforms=foo_transform)
        # Then we use this value as the default for the train, val and test transforms.
        if self.transforms and not any(
            [self.train_transforms, self.val_transforms, self.test_transforms]
        ):
            if not isinstance(self.transforms, list):
                self.transforms = Compose([self.transforms])
            self.train_transforms = self.transforms.copy()
            self.val_transforms = self.transforms.copy()
            self.test_transforms = self.transforms.copy()

        if self.train_transforms is not None and not isinstance(
            self.train_transforms, list
        ):
            self.train_transforms = [self.train_transforms]

        if self.val_transforms is not None and not isinstance(
            self.val_transforms, list
        ):
            self.val_transforms = [self.val_transforms]

        if self.test_transforms is not None and not isinstance(
            self.test_transforms, list
        ):
            self.test_transforms = [self.test_transforms]

        # Actually compose the list of Transforms or callables into a single transform.
        self.train_transforms: Compose = Compose(self.train_transforms or [])
        self.val_transforms: Compose = Compose(self.val_transforms or [])
        self.test_transforms: Compose = Compose(self.test_transforms or [])

        LightningDataModule.__init__(
            self,
            train_transforms=self.train_transforms,
            val_transforms=self.val_transforms,
            test_transforms=self.test_transforms,
        )

        self._observation_space = observation_space
        self._action_space = action_space
        self._reward_space = reward_space

        # TODO: It's a bit confusing to also have a `config` attribute on the
        # Setting. Might want to change this a bit.
        self.config: Config = None

        self.train_env: Environment = None  # type: ignore
        self.val_env: Environment = None  # type: ignore
        self.test_env: Environment = None  # type: ignore

    @abstractmethod
    def apply(self, method: Method, config: Config = None) -> "Setting.Results":
        # NOTE: The actual train/test loop should be defined in a more specific
        # setting. This is just here as an illustration of what that could look
        # like.
        assert False, "this is just here for illustration purposes. "

        method.fit(
            train_env=self.train_dataloader(), valid_env=self.val_dataloader(),
        )

        # Test loop:
        test_env = self.test_dataloader()
        test_metrics = []
        # Number of episodes to test on:
        n_test_episodes = 1

        # Perform a set number of episodes in the test environment.
        for episode in range(n_test_episodes):
            # Get initial observations.
            observations = test_env.reset()

            for i in itertools.count():
                # Get the predictions/actions for a batch of observations.
                actions = method.get_actions(observations, test_env.action_space)
                observations, rewards, done, info = test_env.step(actions)
                # Calculate the 'metrics' (TODO: This should be done be in the env!)
                batch_metrics = ...
                test_metrics.append(batch_metrics)
                if done:
                    break

        return self.Results(test_metrics=test_metrics)

    def get_metrics(self, actions: Actions, rewards: Rewards) -> Union[float, Metrics]:
        """ Calculate the "metric" from the model predictions (actions) and the true labels (rewards).

        In this example, we return a 'Metrics' object:
        - `ClassificationMetrics` for classification problems,
        - `RegressionMetrics` for regression problems.

        We use these objects because they are awesome (they basically simplify
        making plots, wandb logging, and serialization), but you can also just
        return floats if you want, no problem.

        TODO: This is duplicated from Incremental. Need to fix this.
        """
        from sequoia.common.metrics import get_metrics

        # In this particular setting, we only use the y_pred from actions and
        # the y from the rewards.
        if isinstance(actions, Actions):
            actions = torch.as_tensor(actions.y_pred)
        if isinstance(rewards, Rewards):
            rewards = torch.as_tensor(rewards.y)
        # TODO: At the moment there's this problem, ClassificationMetrics wants
        # to create a confusion matrix, which requires 'logits' (so it knows how
        # many classes.
        if isinstance(actions, Tensor):
            actions = actions.cpu().numpy()
        if isinstance(rewards, Tensor):
            rewards = rewards.cpu().numpy()

        if isinstance(self.action_space, spaces.Discrete):
            batch_size = rewards.shape[0]
            actions = torch.as_tensor(actions)
            if len(actions.shape) == 1 or (
                actions.shape[-1] == 1 and self.action_space.n != 2
            ):
                fake_logits = torch.zeros([batch_size, self.action_space.n], dtype=int)
                # FIXME: There must be a smarter way to do this indexing.
                for i, action in enumerate(actions):
                    fake_logits[i, action] = 1
                actions = fake_logits

        return get_metrics(y_pred=actions, y=rewards)

    @property
    def image_space(self) -> Optional[gym.Space]:
        if isinstance(self.observation_space, spaces.Box):
            return self.observation_space
        if isinstance(self.observation_space, spaces.Tuple):
            assert isinstance(self.observation_space[0], spaces.Box)
            return self.observation_space[0]
        if isinstance(self.observation_space, spaces.Dict):
            return self.observation_space.spaces["x"]
        logger.warning(
            f"Don't know what the image space is. "
            f"(self.observation_space={self.observation_space})"
        )
        return None

    @property
    def observation_space(self) -> gym.Space:
        return self._observation_space

    @observation_space.setter
    def observation_space(self, value: gym.Space) -> None:
        """Sets a the observation space.

        NOTE: This also changes the value of the `dims` attribute and the result
        of the `size()` method from LightningDataModule.
        """
        if not isinstance(value, gym.Space):
            raise RuntimeError(f"Value must be a `gym.Space` (got {value})")
        if not self._dims:
            if isinstance(value, spaces.Box):
                self.dims = value.shape
            elif isinstance(value, spaces.Tuple):
                self.dims = tuple(space.shape for space in value.spaces)
            elif isinstance(value, spaces.Dict) and "x" in value.spaces:
                self.dims = value.spaces["x"].shape
            else:
                raise NotImplementedError(
                    f"Don't know how to set the 'dims' attribute using "
                    f"observation space {value}"
                )
        self._observation_space = value

    @property
    def action_space(self) -> gym.Space:
        return self._action_space

    @action_space.setter
    def action_space(self, value: gym.Space) -> None:
        self._action_space = value

    @property
    def reward_space(self) -> gym.Space:
        return self._reward_space

    @reward_space.setter
    def reward_space(self, value: gym.Space) -> None:
        self._reward_space = value

    @classmethod
    def get_available_datasets(cls) -> Iterable[str]:
        """ Returns an iterable of strings which represent the names of datasets. """
        return cls.available_datasets

    @classmethod
    def main(cls, argv: Optional[Union[str, List[str]]] = None) -> Results:
        from sequoia.main import Experiment

        experiment: Experiment
        # Create the Setting object from the command-line:
        setting = cls.from_args(argv)
        # Then create the 'Experiment' from the command-line, which makes it
        # possible to choose between all the methods.
        experiment = Experiment.from_args(argv)
        # fix the setting attribute to be the one parsed above.
        experiment.setting = setting
        results: ResultsType = experiment.launch(argv)
        return results

    def apply_all(
        self, argv: Union[str, List[str]] = None
    ) -> Dict[Type["Method"], Results]:
        applicable_methods = self.get_applicable_methods()
        from sequoia.methods import Method

        all_results: Dict[Type[Method], Results] = {}
        config = Config.from_args(argv)
        for method_type in applicable_methods:
            method = method_type.from_args(argv)
            results = self.apply(method, config)
            all_results[method_type] = results
        logger.info(f"All results for setting of type {type(self)}:")
        logger.info(
            {
                method.get_name(): (results.get_metric() if results else "crashed")
                for method, results in all_results.items()
            }
        )
        return all_results

    @classmethod
    def get_path_to_source_file(cls: Type) -> Path:
        from sequoia.utils.utils import get_path_to_source_file

        return get_path_to_source_file(cls)

    def _check_environments(self):
        """ Do a quick check to make sure that interacting with the envs/dataloaders
        works correctly.
        """
        # Check that the env's spaces are batched versions of the settings'.
        from gym.vector.utils import batch_space
        from sequoia.settings.passive import PassiveEnvironment

        batch_size = self.batch_size
        for loader_method in [
            self.train_dataloader,
            self.val_dataloader,
            self.test_dataloader,
        ]:
            print(f"\n\nChecking loader method {loader_method.__name__}\n\n")
            env = loader_method(batch_size=batch_size)

            batch_size = env.batch_size

            # We could compare the spaces directly, but that's a bit messy, and
            # would be depends on the type of spaces for each. Instead, we could
            # check samples from such spaces on how the spaces are batched.
            if batch_size:
                expected_observation_space = batch_space(
                    self.observation_space, n=batch_size
                )
                expected_action_space = batch_space(self.action_space, n=batch_size)
                expected_reward_space = batch_space(self.reward_space, n=batch_size)
            else:
                expected_observation_space = self.observation_space
                expected_action_space = self.action_space
                expected_reward_space = self.reward_space

            # TODO: Batching the 'Sparse' makes it really ugly, so just
            # comparing the 'image' portion of the space for now.
            assert (
                env.observation_space[0].shape == expected_observation_space[0].shape
            ), (env.observation_space[0], expected_observation_space[0])

            assert env.action_space == expected_action_space, (
                env.action_space,
                expected_action_space,
            )
            assert env.reward_space == expected_reward_space, (
                env.reward_space,
                expected_reward_space,
            )

            # Check that the 'gym API' interaction is working correctly.
            reset_obs: Observations = env.reset()
            self._check_observations(env, reset_obs)

            for i in range(5):
                actions = env.action_space.sample()
                self._check_actions(env, actions)
                step_observations, step_rewards, done, info = env.step(actions)
                self._check_observations(env, step_observations)
                self._check_rewards(env, step_rewards)
                if batch_size:
                    assert not any(done)
                else:
                    assert not done
                # assert not (done if isinstance(done, bool) else any(done))

            for batch in take(env, 5):
                observations: Observations
                rewards: Optional[Rewards]

                if isinstance(env, PassiveEnvironment):
                    observations, rewards = batch
                else:
                    # in RL atm, the 'dataset' gives back only the observations.
                    # Coul
                    observations, rewards = batch, None

                self._check_observations(env, observations)
                if rewards is not None:
                    self._check_rewards(env, rewards)

                if batch_size:
                    actions = tuple(
                        self.action_space.sample() for _ in range(batch_size)
                    )
                else:
                    actions = self.action_space.sample()
                # actions = self.Actions(torch.as_tensor(actions))
                rewards = env.send(actions)
                self._check_rewards(env, rewards)

            env.close()

    def _check_observations(self, env: Environment, observations: Any):
        """ Check that the given observation makes sense for the given environment.

        TODO: This should probably not be in this file here. It's more used for
        testing than anything else.
        """
        assert isinstance(observations, self.Observations), observations
        images = observations.x
        assert isinstance(images, (torch.Tensor, np.ndarray))
        if isinstance(images, Tensor):
            images = images.cpu().numpy()

        # Find the 'image' space:
        if isinstance(env.observation_space, spaces.Box):
            image_space = env.observation_space
        elif isinstance(env.observation_space, spaces.Tuple):
            image_space = env.observation_space[0]
        else:
            raise RuntimeError(
                f"Don't know how to find the image space in the "
                f"env's obs space ({env.observation_space})."
            )
        assert images in image_space

    def _check_actions(self, env: Environment, actions: Any):
        if isinstance(actions, Actions):
            assert isinstance(actions, self.Actions)
            actions = actions.y_pred.cpu().numpy()
        elif isinstance(actions, Tensor):
            actions = actions.cpu().numpy()
        elif isinstance(actions, np.ndarray):
            actions = actions
        assert actions in env.action_space

    def _check_rewards(self, env: Environment, rewards: Any):
        if isinstance(rewards, Rewards):
            assert isinstance(rewards, self.Rewards)
            rewards = rewards.y
        if isinstance(rewards, Tensor):
            rewards = rewards.cpu().numpy()
        if isinstance(rewards, np.ndarray):
            rewards = rewards
        if isinstance(rewards, (int, float)):
            rewards = np.asarray(rewards)
        assert rewards in env.reward_space, (rewards, env.reward_space)

    # Just to make type hinters stop throwing errors when using the constructor
    # to create a Setting.
    def __new__(cls, *args, **kwargs):
        return super().__new__(cls, *args, **kwargs)

    @classmethod
    def load_benchmark(
        cls: Type[SettingType], benchmark: Union[str, Path]
    ) -> SettingType:
        """ Load the given "benchmark" (pre-configured Setting) of this type.

        Parameters
        ----------
        cls : Type[SettingType]
            Type of Setting to create.
        benchmark : Union[str, Path]
            Either the name of a benchmark (e.g. "cartpole_state", "monsterkong", etc.)
            or a path to a json/yaml file.

        Returns
        -------
        SettingType
            Setting of type `cls`, appropriately populated according to the chosen
            benchmark.

        Raises
        ------
        RuntimeError
            If `benchmark` isn't an existing file or a known preset.
        RuntimeError
            If any command-line arguments are present in sys.argv which would be ignored
            when creating this setting.
        """
        # If the provided benchmark isn't a path, try to get the value from
        # the `setting_presets` dict. If it isn't in the dict, raise an
        # error.
        if not Path(benchmark).is_file():
            if benchmark in setting_presets:
                benchmark = setting_presets[benchmark]
            else:
                raise RuntimeError(
                    f"Could not find benchmark '{benchmark}': it "
                    f"is neither a path to a file or a key of the "
                    f"`setting_presets` dictionary. \n"
                    f"(Available presets: {setting_presets}) "
                )
        # Creating an experiment for the given setting, loaded from the
        # config file.
        # TODO: IDEA: Do the same thing for loading the Method?
        logger.info(
            f"Will load the options for setting {cls} from the file "
            f"at path {benchmark}."
        )

        # Raise an error if any of the args in sys.argv would have been used
        # up by the Setting, just to prevent any ambiguities.
        _, unused_args = cls.from_known_args()
        consumed_args = list(set(sys.argv[1:]) - set(unused_args))
        if consumed_args:
            # TODO: This could also be trigerred if there were arguments
            # in the method with the same name as some from the Setting.
            raise RuntimeError(
                f"Cannot pass command-line arguments for the Setting when "
                f"loading a benchmark, since these arguments whould have been "
                f"ignored when creating the setting of type {cls} "
                f"anyway: {consumed_args}"
            )

        drop_extras = False
        # Actually load the setting from the file.
        setting = cls.load(path=benchmark, drop_extra_fields=drop_extras)
        return setting