Example #1
0
class LogisticRegression:
    # We can set/fix hyper
    def __init__(self, random_state, **hyper_parameters):
        self.model_ctor = linear_model.LogisticRegression
        self.random_state = random_state
        self.hp = HyperParameters(self.hyperparameter_space(),
                                  **hyper_parameters)
        self.model = None

    # List of all the hyper-parameters
    @staticmethod
    def hyperparameter_space():
        return {'C': 'uniform(0, 1)', 'l1_ratio': 'uniform(0, 1)'}

    # List of hyper-parameters that needs to be set to finish initialization
    def get_space(self):
        return self.hp.missing_parameters()

    # Initialize the model when all the hyper-parameters are there
    def init(self, **hyper_parameters):
        self.hp.add_parameters(**hyper_parameters)
        self.model = self.model_ctor(penalty='elasticnet',
                                     solver='saga',
                                     random_state=self.random_state,
                                     **self.hp.parameters(strict=True))

    def predict(self, x):
        try:
            return self.model.predict(x)
        except sklearn.exceptions.NotFittedError as e:
            raise NotFittedError from e

    def fit(self, x, y):
        self.model = self.model.fit(x, y)
        return self.model
Example #2
0
    def __init__(self,
                 name=None,
                 *,
                 params=None,
                 optimizer=None,
                 half=False,
                 loss_scale=1,
                 dynamic_loss_scale=False,
                 scale_window=1000,
                 scale_factor=2,
                 min_loss_scale=None,
                 max_loss_scale=2.**24,
                 **kwargs):
        self._optimizer = None

        if params is not None:
            params = list(params)
            assert isinstance(params, (list, tuple))

        self._model_parameters = params
        self._half_parameters(half, loss_scale, dynamic_loss_scale,
                              scale_window, scale_factor, min_loss_scale,
                              max_loss_scale)

        # Track defined hyper parameters
        self.hyper_parameters = HyperParameters(space={})

        if optimizer:
            warning('Using custom optimizer')
            if isinstance(optimizer, type):
                self.optimizer_builder = optimizer

                if hasattr(optimizer, 'get_space'):
                    self.hyper_parameters.space = optimizer.get_space()
            else:
                self._optimizer = self._wrap_optimizer(optimizer)

                if hasattr(self._optimizer, 'get_space'):
                    self.hyper_parameters.space = self._optimizer.get_space()

        elif name:
            # load an olympus model
            self.optimizer_builder = registered_optimizers.get(name.lower())

            if not self.optimizer_builder:
                raise RegisteredOptimizerNotFound(name)

            if hasattr(self.optimizer_builder, 'get_space'):
                self.hyper_parameters.space = self.optimizer_builder.get_space(
                )

        else:
            raise MissingArgument('optimizer or name needs to be set')

        # All additional args are hyper parameters
        self.hyper_parameters.add_parameters(**kwargs)
Example #3
0
def test_hyperparameter_nested_tracking_all_set():
    space = {
        'initializer': {
            'a': 'uniform(0, 1)',
            'b': 'uniform(0, 1)',
        }
    }

    hp = HyperParameters(space, initializer=dict(a=0.123, b=0.124))
    assert hp.parameters(strict=True) == dict(initializer=dict(a=0.123, b=0.124))
Example #4
0
    def __init__(self, name, seed=0, **kwargs):
        self.name = name
        self.hyper_parameters = HyperParameters(space={})
        self.seed = seed
        self._initializer = None

        self.initializer_ctor = registered_initialization.get(name)

        if self.initializer_ctor is None:
            raise RegisteredInitNotFound(name)

        if hasattr(self.initializer_ctor, 'get_space'):
            self.hyper_parameters.space = self.initializer_ctor.get_space()

        self.hyper_parameters.add_parameters(**kwargs)
Example #5
0
class DecisionTree:
    # We can set/fix hyper
    def __init__(self, random_state, **hyper_parameters):
        self.model_ctor = tree.DecisionTreeClassifier
        self.random_state = random_state
        self.hp = HyperParameters(self.hyperparameter_space(), **hyper_parameters)
        self.model = None

    # List of all the hyper-parameters
    @staticmethod
    def hyperparameter_space():
        return {
            'max_depth': 'uniform(0, 100, discrete=True)',
            'min_samples_split': 'uniform(1, 10, discrete=True)',
            'min_samples_leaf': 'uniform(1, 10, discrete=True)',
            'min_weight_fraction_leaf': 'uniform(0, 1)'
        }

    # List of hyper-parameters that needs to be set to finish initialization
    def get_space(self):
        return self.hp.missing_parameters()

    # Initialize the model when all the hyper-parameters are there
    def init(self, **hyper_parameters):
        self.hp.add_parameters(**hyper_parameters)
        self.model = self.model_ctor(
            criterion='gini',
            splitter='best',
            max_features=None,
            random_state=self.random_state,
            **self.hp.parameters(strict=True)
        )

    def predict(self, x):
        try:
            return self.model.predict(x)
        except sklearn.exceptions.NotFittedError as e:
            raise NotFittedError from e

    def fit(self, x, y):
        self.model = self.model.fit(x, y)
        return self.model
Example #6
0
class MLPRegressor:
    # We can set/fix hyper
    def __init__(self, random_state, **hyper_parameters):
        self.model_ctor = sklearn.neural_network.MLPRegressor
        self.random_state = random_state
        self.hp = HyperParameters(self.hyperparameter_space(),
                                  **hyper_parameters)
        self.model = None

    # List of all the hyper-parameters
    @staticmethod
    def hyperparameter_space():
        return {
            'hidden_layer_sizes': 'uniform(50, 70, discrete=True)',
            'solver': 'uniform(0, 3, discrete=True)',
            'alpha': 'uniform(0, 0.1)'
        }

    # List of hyper-parameters that needs to be set to finish initialization
    def get_space(self):
        return self.hp.missing_parameters()

    # Initialize the model when all the hyper-parameters are there
    def init(self, **hyper_parameters):
        self.hp.add_parameters(**hyper_parameters)
        self.model = self.model_ctor(random_state=self.random_state,
                                     **self.hp.parameters(strict=True))

    def predict(self, x):
        #import pdb; pdb.set_trace()
        #TODO: check if this fixes it
        x = x[0]
        try:
            return self.model.predict(x)
        except sklearn.exceptions.NotFittedError as e:
            raise NotFittedError from e

    def fit(self, x, y):
        self.model = self.model.fit(x, y)
        return self.model
Example #7
0
def test_hyperparameter_tracking():
    space = {
        'a': 'uniform(0, 1)',
        'b': 'uniform(0, 1)'
    }

    # space with Fixed HP
    hp = HyperParameters(space, b=0.124)

    # Hp a is missing
    with pytest.raises(MissingParameters):
        hp.parameters(strict=True)

    # return the space of missing params
    assert hp.missing_parameters() == dict(a='uniform(0, 1)')

    hp.add_parameters(a=0.123)
    assert hp.missing_parameters() == {}
    assert hp.parameters(strict=True) == dict(a=0.123, b=0.124)
Example #8
0
def test_hyperparameter_nested_tracking():
    space = {
        'initializer': {
            'a': 'uniform(0, 1)',
            'b': 'uniform(0, 1)',
        }
    }

    hp = HyperParameters(space, initializer=dict(b=0.124))

    # Hp a is missing
    with pytest.raises(MissingParameters):
        hp.parameters(strict=True)

    # return the space of missing params
    assert hp.missing_parameters() == dict(initializer=dict(a='uniform(0, 1)'))

    hp.add_parameters(initializer=dict(a=0.123))
    assert hp.missing_parameters() == {}
    assert hp.parameters(strict=True) == dict(initializer=dict(a=0.123, b=0.124))
Example #9
0
class Initializer:
    """Lazy Initializer"""
    def __init__(self, name, seed=0, **kwargs):
        self.name = name
        self.hyper_parameters = HyperParameters(space={})
        self.seed = seed
        self._initializer = None

        self.initializer_ctor = registered_initialization.get(name)

        if self.initializer_ctor is None:
            raise RegisteredInitNotFound(name)

        if hasattr(self.initializer_ctor, 'get_space'):
            self.hyper_parameters.space = self.initializer_ctor.get_space()

        self.hyper_parameters.add_parameters(**kwargs)

    def get_space(self):
        """Return the dimension space of each parameters"""
        return self.hyper_parameters.missing_parameters()

    def get_current_space(self):
        """Get currently defined parameter space"""
        return self.hyper_parameters.parameters(strict=False)

    def init(self, override=False, **kwargs):
        if self._initializer and not override:
            warning(
                'Initializer is already set, use override=True to force re initialization'
            )
            return self

        self.hyper_parameters.add_parameters(**kwargs)
        self._initializer = self.initializer_ctor(
            **self.hyper_parameters.parameters(strict=True))

        return self

    @property
    def initializer(self):
        if not self._initializer:
            self.init()

        return self._initializer

    def __call__(self, model):
        with fork_rng(enabled=True):
            init_seed(self.seed)

            return self.initializer(model)
Example #10
0
    def __init__(self, name=None, *, schedule=None, optimizer=None, **kwargs):
        self._schedule = None
        self._schedule_builder = None
        self._optimizer = optimizer

        self.hyper_parameters = HyperParameters(space={})

        if schedule:
            if isinstance(schedule, type):
                self._schedule_builder = schedule

                if hasattr(schedule, 'get_space'):
                    self.hyper_parameters.space = schedule.get_space()

            else:
                self._schedule = schedule

            if hasattr(self._schedule, 'get_space'):
                self.hyper_parameters.space = self._schedule.get_space()

        elif name:
            # load an olympus model
            builder = registered_schedules.get(name)

            if not builder:
                raise RegisteredLRSchedulerNotFound(name)

            self._schedule_builder = builder

            if hasattr(self._schedule_builder, 'get_space'):
                self.hyper_parameters.space = self._schedule_builder.get_space()

        else:
            raise MissingArgument('None or name needs to be set')

        self.hyper_parameters.add_parameters(**kwargs)
Example #11
0
class Model(nn.Module):
    """Olympus standardized Model interface

    Parameters
    ----------
    name: str
        Name of a registered model

    half: bool
        Convert the network to half/fp16

    model: Model
        Custom model to use, mutually exclusive with :param name

    Examples
    --------

    Model wrappers that provide a wide range of utility built-in.

    Can instantiate common model directly

    >>> model = Model('resnet18', input_size=(1, 28, 28), output_size=(10,))

    Handles mixed precision conversion for you

    >>> model = Model('resnet18', input_size=(1, 28, 28), output_size=(10,), half=True)

    Handles weight initialization

    >>> model = Model('resnet18', input_size=(1, 28, 28), output_size=(10,), weight_init='glorot_uniform')

    Supports your custom model

    >>> class MyModel(nn.Module):
    ...     def __init__(self, input_size, output_size):
    ...         self.main = nn.Linear(input_size[0], output_size[0])
    ...
    ...     def forward(self, x):
    ...         return self.main(x)
    >>>
    >>> model = Model(model=MyModel, input_size=(1, 28, 28), output_size=(10,))

    Raises
    ------
    RegisteredModelNotFound
        when using a name of an known model

    MissingArgument:
        if name nor model were not set
    """
    _dtype = torch.float32
    _device = torch.device('cpu')

    def __init__(self,
                 name=None,
                 *,
                 half=False,
                 model=None,
                 input_size=None,
                 output_size=None,
                 weight_init=default_init,
                 **kwargs):
        super(Model, self).__init__()
        # Save all the args that ware passed down so we can instantiate it again in standalone
        self.replay_args = dict(name=name,
                                half=half,
                                model=model,
                                input_size=input_size,
                                output_size=output_size,
                                weight_init=weight_init,
                                kwargs=kwargs)

        self.transform = lambda x: try_convert(x, self.device, self.dtype)
        self.half = half
        self._model = None

        # Track defined hyper parameters
        self.hyper_parameters = HyperParameters(space=dict())

        # If init is set then we can add its hyper parameters
        self.weight_init = weight_init
        if weight_init is not None:
            if isinstance(weight_init, str):
                self.weight_init = Initializer(weight_init)

            # replace weight init by its own hyper parameters
            space = self.weight_init.get_space()
            if space:
                self.hyper_parameters.space.update(dict(initializer=space))

        # Make a Lazy Model that will be initialized once all the hyper parameters are set
        if model:
            if hasattr(model, 'get_space'):
                self.hyper_parameters.space.update(model.get_space())

            if isinstance(model, type):
                self.model_builder = LazyCall(model,
                                              input_size=input_size,
                                              output_size=output_size)
            else:
                self.model_builder = LazyCall(lambda *args, **kwargs: model)

        elif name:
            # load an olympus model
            model_fun = registered_models.get(name)

            if not model_fun:
                raise RegisteredModelNotFound(name)

            self.model_builder = LazyCall(model_fun,
                                          input_size=input_size,
                                          output_size=output_size)

            if hasattr(model_fun, 'get_space'):
                self.hyper_parameters.space.update(model_fun.get_space())
        else:
            raise MissingArgument('Model or Name need to be set')

        # Any Additional parameters set Hyper parameters
        self.other_params = self.hyper_parameters.add_parameters(strict=False,
                                                                 **kwargs)

    @property
    def dtype(self):
        return self._dtype

    @property
    def device(self):
        return self._device

    def get_space(self):
        """Return hyper parameter space"""
        return self.hyper_parameters.missing_parameters()

    def get_current_space(self):
        """Get currently defined parameter space"""
        return self.hyper_parameters.parameters(strict=False)

    def init(self, override=False, **model_hyperparams):
        others = self.hyper_parameters.add_parameters(strict=False,
                                                      **model_hyperparams)
        self.other_params.update(others)

        params = self.hyper_parameters.parameters(strict=True)

        initializer = params.pop('initializer', {})
        if isinstance(initializer, dict):
            self.weight_init.init(**initializer)

        self._model = self.model_builder.invoke(**self.other_params, **params)
        self.weight_init(self._model)

        if self.half:
            self._model = network_to_half(self._model)

        # Register module so we can use all the parent methods
        self.add_module('_model', self._model)
        self.replay_args['kwargs'].update(model_hyperparams)
        return self

    @property
    def model(self):
        if not self._model:
            self.init()

        return self._model

    def forward(self, *input, **kwargs):
        return self.model(self.transform(input[0]), *input[1:], **kwargs)

    def __call__(self, *args, **kwargs):
        return super(Model, self).__call__(*args, **kwargs)

    def state_dict(self, destination=None, prefix='', keep_vars=False):
        destination = {
            'model': self.model.state_dict(None, prefix, keep_vars),
            'half': self.half,
            'replay': self.replay_args,
            'types': {
                'model': type(self.model)
            }
        }
        return destination

    @staticmethod
    def from_state(state):
        kwargs = state.get('replay')
        kwargs.update(kwargs.pop('kwargs', dict()))
        m = Model(**kwargs)
        m.init()
        m.load_state_dict(state)
        return m

    def load_state_dict(self, state_dict, strict=True):
        self.half = state_dict['half']
        self.model.load_state_dict(state_dict['model'], strict=strict)

    def parameters(self, recurse: bool = True):
        return self.model.parameters(recurse)

    def to(self, *args, **kwargs):
        self._device, self._dtype, *_ = torch._C._nn._parse_to(*args, **kwargs)
        super(Model, self).to(*args, **kwargs)
        return self

    def act(self, *args, **kwargs):
        return self.model.act(*args, **kwargs)

    def critic(self, *args, **kwargs):
        return self.model.critic(*args, **kwargs)
Example #12
0
 def __init__(self, random_state, **hyper_parameters):
     self.model_ctor = sklearn.neural_network.MLPRegressor
     self.random_state = random_state
     self.hp = HyperParameters(self.hyperparameter_space(),
                               **hyper_parameters)
     self.model = None
Example #13
0
    def __init__(self,
                 name=None,
                 *,
                 half=False,
                 model=None,
                 input_size=None,
                 output_size=None,
                 weight_init=default_init,
                 **kwargs):
        super(Model, self).__init__()
        # Save all the args that ware passed down so we can instantiate it again in standalone
        self.replay_args = dict(name=name,
                                half=half,
                                model=model,
                                input_size=input_size,
                                output_size=output_size,
                                weight_init=weight_init,
                                kwargs=kwargs)

        self.transform = lambda x: try_convert(x, self.device, self.dtype)
        self.half = half
        self._model = None

        # Track defined hyper parameters
        self.hyper_parameters = HyperParameters(space=dict())

        # If init is set then we can add its hyper parameters
        self.weight_init = weight_init
        if weight_init is not None:
            if isinstance(weight_init, str):
                self.weight_init = Initializer(weight_init)

            # replace weight init by its own hyper parameters
            space = self.weight_init.get_space()
            if space:
                self.hyper_parameters.space.update(dict(initializer=space))

        # Make a Lazy Model that will be initialized once all the hyper parameters are set
        if model:
            if hasattr(model, 'get_space'):
                self.hyper_parameters.space.update(model.get_space())

            if isinstance(model, type):
                self.model_builder = LazyCall(model,
                                              input_size=input_size,
                                              output_size=output_size)
            else:
                self.model_builder = LazyCall(lambda *args, **kwargs: model)

        elif name:
            # load an olympus model
            model_fun = registered_models.get(name)

            if not model_fun:
                raise RegisteredModelNotFound(name)

            self.model_builder = LazyCall(model_fun,
                                          input_size=input_size,
                                          output_size=output_size)

            if hasattr(model_fun, 'get_space'):
                self.hyper_parameters.space.update(model_fun.get_space())
        else:
            raise MissingArgument('Model or Name need to be set')

        # Any Additional parameters set Hyper parameters
        self.other_params = self.hyper_parameters.add_parameters(strict=False,
                                                                 **kwargs)
Example #14
0
class Optimizer(TorchOptimizer):
    """Lazy Optimizer that allows you to first fetch the supported parameters using ``get_space`` and then
    initialize the underlying optimizer using ``init_optimizer``

    Parameters
    ----------
    name: str
        Name of a registered optimizer

    optimizer: Optimizer
        Custom optimizer, mutually exclusive with :param name

    half: bool
        Enable fp16 Optimizer

    loss_scale: float (LS)
        fp16 optimizer option: loss scale to use

    dynamic_loss_scale: bool
        fp16 optimizer option: Enable dynamic loss scaling

    scale_window: int (SW)
        dynamic loss scaling option: Increase LS after SW successful iteration

    scale_factor: float (SF)
        dynamic loss scaling option: divide LS by SF after an overflow, or
        multiply LS by SF after SW successful iteration

    min_loss_scale: float

    max_loss_scale: float

    Examples
    --------

    Follows standard Pytorch Optimizer

    >>> import torch
    >>> from olympus.models import Model
    >>> model = Model('resnet18',
    ...     input_size=(1, 28, 28),
    ...     output_size=10,)
    >>>
    >>> x = torch.randn((1, 1, 28, 28))
    >>>
    >>> optimizer = Optimizer('SGD', params=model.parameters(),  weight_decay=1e-3, lr=0.001, momentum=0.8)
    >>>
    >>> optimizer.zero_grad()
    >>> loss = model(x).sum()
    >>> optimizer.backward(loss)
    >>> optimizer.step()

    Can be lazily initialized for hyper parameter search

    >>> optimizer = Optimizer('SGD')
    >>> optimizer.get_space()
    {'lr': 'loguniform(1e-5, 1)', 'momentum': 'uniform(0, 1)', 'weight_decay': 'loguniform(1e-10, 1e-3)'}
    >>> optimizer.init(model.parameters(), weight_decay=1e-3, lr=0.001, momentum=0.8)
    >>>
    >>> optimizer.zero_grad()
    >>> loss = model(x).sum()
    >>> optimizer.backward(loss)
    >>> optimizer.step()

    Switch to a mixed precision optimizer if needed

    >>> optimizer = Optimizer('SGD', half=True)

    Raises
    ------
    RegisteredOptimizerNotFound
        when using a name of an known optimizers

    MissingArgument:
        if name nor optimizer were not set

    WrongParameter
        if a wrong hyper parameter is passed in kwargs
    """
    half = False
    half_args = dict()
    _optimizer = None

    def __init__(self,
                 name=None,
                 *,
                 params=None,
                 optimizer=None,
                 half=False,
                 loss_scale=1,
                 dynamic_loss_scale=False,
                 scale_window=1000,
                 scale_factor=2,
                 min_loss_scale=None,
                 max_loss_scale=2.**24,
                 **kwargs):
        self._optimizer = None

        if params is not None:
            params = list(params)
            assert isinstance(params, (list, tuple))

        self._model_parameters = params
        self._half_parameters(half, loss_scale, dynamic_loss_scale,
                              scale_window, scale_factor, min_loss_scale,
                              max_loss_scale)

        # Track defined hyper parameters
        self.hyper_parameters = HyperParameters(space={})

        if optimizer:
            warning('Using custom optimizer')
            if isinstance(optimizer, type):
                self.optimizer_builder = optimizer

                if hasattr(optimizer, 'get_space'):
                    self.hyper_parameters.space = optimizer.get_space()
            else:
                self._optimizer = self._wrap_optimizer(optimizer)

                if hasattr(self._optimizer, 'get_space'):
                    self.hyper_parameters.space = self._optimizer.get_space()

        elif name:
            # load an olympus model
            self.optimizer_builder = registered_optimizers.get(name.lower())

            if not self.optimizer_builder:
                raise RegisteredOptimizerNotFound(name)

            if hasattr(self.optimizer_builder, 'get_space'):
                self.hyper_parameters.space = self.optimizer_builder.get_space(
                )

        else:
            raise MissingArgument('optimizer or name needs to be set')

        # All additional args are hyper parameters
        self.hyper_parameters.add_parameters(**kwargs)

    def _half_parameters(self,
                         half=False,
                         loss_scale=1,
                         dynamic_loss_scale=False,
                         scale_window=1000,
                         scale_factor=2,
                         min_loss_scale=None,
                         max_loss_scale=2.**24):
        """Save the configuration of the fp16 optimizer"""
        self.half = half

        static_loss_scale = loss_scale
        if dynamic_loss_scale:
            static_loss_scale = 'dynamic'

        self.half_args = dict(static_loss_scale=static_loss_scale,
                              dynamic_loss_scale=dynamic_loss_scale,
                              dynamic_loss_args=dict(
                                  init_scale=loss_scale,
                                  scale_factor=scale_factor,
                                  scale_window=scale_window,
                                  min_loss_scale=min_loss_scale,
                                  max_loss_scale=max_loss_scale),
                              verbose=False)

    def _wrap_optimizer(self, optimizer):
        if self.half:
            from olympus.utils.fp16 import FP16Optimizer
            return FP16Optimizer(optimizer, **self.half_args)

        return optimizer

    def get_space(self) -> Dict[str, str]:
        """Return the dimension space of each parameters"""
        if self._optimizer:
            warning('Optimizer is already set')

        return self.hyper_parameters.missing_parameters()

    def get_current_space(self):
        """Get currently defined parameter space"""
        return self.hyper_parameters.parameters(strict=False)

    @property
    def defaults(self):
        """Returns the default hyper parameter of the underlying optimizer"""
        return self.optimizer_builder.defaults()

    def init(self, params=None, override=False, **kwargs):
        """instantiate the underlying optimizer

        Raises
        ------
        MissingParameters
            if an hyper parameter is missing
        """
        if params is not None:
            params = list(params)
            assert isinstance(params, (list, tuple))

        if self._optimizer and not override:
            warning(
                'Optimizer is already set, use override=True to force re initialization'
            )
            return self

        # add missing hyper parameters
        self.hyper_parameters.add_parameters(**kwargs)

        if params is None:
            params = self._model_parameters

        if params is None:
            raise MissingArgument('Missing Model parameters!')

        self._optimizer = self._wrap_optimizer(
            self.optimizer_builder(
                params, **self.hyper_parameters.parameters(strict=True)))

    @property
    def optimizer(self):
        if not self._optimizer:
            self.init()

        return self._optimizer

    def backward(self, loss):
        if self.half:  # for loss scaling
            self.optimizer.backward(loss)
        else:
            loss.backward()

    def step(self, closure=None):
        return self.optimizer.step(closure)

    def zero_grad(self):
        return self.optimizer.zero_grad()

    @property
    def param_groups(self):
        return self.optimizer.param_groups

    @property
    def state(self):
        return self.optimizer.state

    def to(self, device):
        if self._optimizer:
            for state in self.state.values():
                for k, v in state.items():
                    if torch.is_tensor(v):
                        state[k] = v.to(device=device)
        return self

    def state_dict(self, destination=None, prefix='', keep_vars=False):
        s = self.optimizer.state_dict()
        return s

    def load_state_dict(self, state_dict, strict=True, device=None):
        self.optimizer.load_state_dict(state_dict)

        if device:
            self.to(device)
Example #15
0
 def __init__(self, random_state, **hyper_parameters):
     self.model_ctor = tree.DecisionTreeClassifier
     self.random_state = random_state
     self.hp = HyperParameters(self.hyperparameter_space(), **hyper_parameters)
     self.model = None
Example #16
0
class LRSchedule:
    """Lazy LRSchedule that allows you to first fetch the supported parameters using ``get_space`` and then
    initialize the underlying schedule using ``init_optimizer``

    Parameters
    ----------
    name: str
       Name of a registered schedule

    schedule: LRSchedule
       Custom schedule, mutually exclusive with :param name

    Examples
    --------

    .. code-block:: python

        from olympus.optimizers import Optimizer
        optimizer = Optimizer('sgd')
        schedule = LRSchedule('exponential')
        schedule.get_space()
        # {'gamma': 'loguniform(0.97, 1)'}
        schedule.init(optimizer, gamma=0.97)

    Raises
    ------
    RegisteredLRSchedulerNotFound
        when using a name of an known schedule

    MissingArgument:
        if name nor schedule were not set
    """

    def __init__(self, name=None, *, schedule=None, optimizer=None, **kwargs):
        self._schedule = None
        self._schedule_builder = None
        self._optimizer = optimizer

        self.hyper_parameters = HyperParameters(space={})

        if schedule:
            if isinstance(schedule, type):
                self._schedule_builder = schedule

                if hasattr(schedule, 'get_space'):
                    self.hyper_parameters.space = schedule.get_space()

            else:
                self._schedule = schedule

            if hasattr(self._schedule, 'get_space'):
                self.hyper_parameters.space = self._schedule.get_space()

        elif name:
            # load an olympus model
            builder = registered_schedules.get(name)

            if not builder:
                raise RegisteredLRSchedulerNotFound(name)

            self._schedule_builder = builder

            if hasattr(self._schedule_builder, 'get_space'):
                self.hyper_parameters.space = self._schedule_builder.get_space()

        else:
            raise MissingArgument('None or name needs to be set')

        self.hyper_parameters.add_parameters(**kwargs)

    def init(self, optimizer=None, override=False, **kwargs):
        """Initialize the LR schedule with the given hyper parameters"""
        if self._schedule:
            warning('LRSchedule is already set, use override=True to force re initialization')

            if not override:
                return self._schedule

        if optimizer is None:
            optimizer = self._optimizer

        if optimizer is None:
            raise MissingArgument('Missing optimizer argument!')

        self.hyper_parameters.add_parameters(**kwargs)
        self._schedule = self._schedule_builder(
            optimizer,
            **self.hyper_parameters.parameters(strict=True))

        return self

    def get_space(self):
        """Return the missing hyper parameters required to initialize the LR schedule"""
        if self._schedule:
            warning('LRSchedule is already set')

        return self.hyper_parameters.missing_parameters()

    def get_current_space(self):
        """Get currently defined parameter space"""
        return self.hyper_parameters.parameters(strict=False)

    @property
    def defaults(self):
        """Return default hyper parameters"""
        return self._schedule_builder.defaults()

    @property
    def lr_scheduler(self):
        if not self._schedule:
            self.init()

        return self._schedule

    def state_dict(self, destination=None, prefix='', keep_vars=False):
        return self.lr_scheduler.state_dict()

    def load_state_dict(self, state_dict, strict=True):
        self.lr_scheduler.load_state_dict(state_dict)

    def epoch(self, epoch, metrics=None):
        """Called after every epoch to update LR"""
        self.lr_scheduler.epoch(epoch, metrics)

    def step(self, step, metrics=None):
        """Called every step/batch to update LR"""
        self.lr_scheduler.step(step, metrics)

    def get_lr(self):
        return self.lr_scheduler.get_lr()
Example #17
0
 def __init__(self, random_state, **hyper_parameters):
     self.model_ctor = linear_model.LogisticRegression
     self.random_state = random_state
     self.hp = HyperParameters(self.hyperparameter_space(),
                               **hyper_parameters)
     self.model = None