예제 #1
0
    def init_from_scratch(self) -> None:
        """
        Initialize ``self.model`` as some sklearn model from scratch with given in ``self.model_params`` parameters.

        Returns:
            None
        """
        log.info("Initializing model {} from scratch".format(self.model_class))
        model_function = cls_from_str(self.model_class)

        if model_function is None:
            raise ConfigError("Model with {} model_class was not found.".format(self.model_class))

        given_params = {}
        if self.model_params:
            available_params = self.get_function_params(model_function)
            for param_name in self.model_params.keys():
                if param_name in available_params:
                    try:
                        given_params[param_name] = cls_from_str(self.model_params[param_name])
                    except (AttributeError, ValueError, ConfigError):
                        given_params[param_name] = self.model_params[param_name]

        self.model = model_function(**given_params)
        return
예제 #2
0
    def init_from_scratch(self) -> None:
        """
        Initialize ``self.model`` as some sklearn model from scratch with given in ``self.model_params`` parameters.

        Returns:
            None
        """
        log.info("Initializing model {} from scratch".format(self.model_class))
        model_function = cls_from_str(self.model_class)

        if model_function is None:
            raise ConfigError(
                "Model with {} model_class was not found.".format(
                    self.model_class))

        given_params = {}
        if self.model_params:
            available_params = self.get_function_params(model_function)
            for param_name in self.model_params.keys():
                if param_name in available_params:
                    try:
                        given_params[param_name] = cls_from_str(
                            self.model_params[param_name])
                    except (AttributeError, ValueError, ConfigError):
                        given_params[param_name] = self.model_params[
                            param_name]

        self.model = model_function(**given_params)
        return
예제 #3
0
    def __init__(self,
                 optimizer: str = 'AdamOptimizer',
                 clip_norm: float = None,
                 momentum: float = None,
                 **kwargs) -> None:
        TFModel.__init__(self, **kwargs)

        try:
            self._optimizer = cls_from_str(optimizer)
        except Exception:
            self._optimizer = getattr(tf.train, optimizer.split(':')[-1])
        if not issubclass(self._optimizer, tf.train.Optimizer):
            raise ConfigError("`optimizer` should be tensorflow.train.Optimizer subclass")
        self._clip_norm = clip_norm

        if (momentum is None) and\
                self._optimizer not in (tf.train.AdagradOptimizer,
                                        tf.train.AdagradOptimizer,
                                        tf.train.GradientDescentOptimizer,
                                        tf.train.ProximalGradientDescentOptimizer,
                                        tf.train.ProximalAdagradOptimizer):
            momentum = 0.9
        kwargs['momentum'] = momentum

        LRScheduledModel.__init__(self, **kwargs)
예제 #4
0
def from_params(params: Dict, mode: str = 'infer', **kwargs) -> Component:
    """Builds and returns the Component from corresponding dictionary of parameters."""
    # what is passed in json:
    config_params = {k: _resolve(v) for k, v in params.items()}

    # get component by reference (if any)
    if 'ref' in config_params:
        try:
            return _refs[config_params['ref']]
        except KeyError:
            e = ConfigError(
                'Component with id "{id}" was referenced but not initialized'.
                format(id=config_params['ref']))
            log.exception(e)
            raise e

    elif 'config_path' in config_params:
        from deeppavlov.core.commands.infer import build_model_from_config
        deeppavlov_root = get_deeppavlov_root()
        refs = _refs.copy()
        _refs.clear()
        config = read_json(expand_path(config_params['config_path']))
        model = build_model_from_config(config)
        set_deeppavlov_root({'deeppavlov_root': deeppavlov_root})
        _refs.clear()
        _refs.update(refs)
        return model

    elif 'class' in config_params:
        cls = cls_from_str(config_params.pop('class'))
    else:
        cls_name = config_params.pop('name', None)
        if not cls_name:
            e = ConfigError(
                'Component config has no `name` nor `ref` or `class` fields')
            log.exception(e)
            raise e
        cls = get_model(cls_name)

    # find the submodels params recursively
    config_params = {k: _init_param(v, mode) for k, v in config_params.items()}

    try:
        spec = inspect.getfullargspec(cls)
        if 'mode' in spec.args + spec.kwonlyargs or spec.varkw is not None:
            kwargs['mode'] = mode

        component = cls(**dict(config_params, **kwargs))
        try:
            _refs[config_params['id']] = component
        except KeyError:
            pass
    except Exception:
        log.exception("Exception in {}".format(cls))
        raise

    return component
예제 #5
0
def from_params(params: Dict, mode: str = 'infer', **kwargs) -> Component:
    """Builds and returns the Component from corresponding dictionary of parameters."""
    # what is passed in json:
    config_params = {k: _resolve(v) for k, v in params.items()}

    # get component by reference (if any)
    if 'ref' in config_params:
        try:
            return _refs[config_params['ref']]
        except KeyError:
            e = ConfigError('Component with id "{id}" was referenced but not initialized'
                            .format(id=config_params['ref']))
            log.exception(e)
            raise e

    elif 'config_path' in config_params:
        from deeppavlov.core.commands.infer import build_model_from_config
        deeppavlov_root = get_deeppavlov_root()
        refs = _refs.copy()
        _refs.clear()
        config = read_json(expand_path(config_params['config_path']))
        model = build_model_from_config(config, as_component=True)
        set_deeppavlov_root({'deeppavlov_root': deeppavlov_root})
        _refs.clear()
        _refs.update(refs)
        return model

    elif 'class' in config_params:
        cls = cls_from_str(config_params.pop('class'))
    else:
        cls_name = config_params.pop('name', None)
        if not cls_name:
            e = ConfigError('Component config has no `name` nor `ref` or `class` fields')
            log.exception(e)
            raise e
        cls = get_model(cls_name)

    # find the submodels params recursively
    config_params = {k: _init_param(v, mode) for k, v in config_params.items()}

    try:
        spec = inspect.getfullargspec(cls)
        if 'mode' in spec.args+spec.kwonlyargs or spec.varkw is not None:
            kwargs['mode'] = mode

        component = cls(**dict(config_params, **kwargs))
        try:
            _refs[config_params['id']] = component
        except KeyError:
            pass
    except Exception:
        log.exception("Exception in {}".format(cls))
        raise

    return component
예제 #6
0
    def __init__(self,
                 optimizer: str = 'AdamOptimizer',
                 clip_norm: float = None,
                 momentum: float = None,
                 **kwargs) -> None:
        TFModel.__init__(self, **kwargs)

        try:
            self._optimizer = cls_from_str(optimizer)
        except Exception:
            self._optimizer = getattr(tf.train, optimizer.split(':')[-1])
        if not issubclass(self._optimizer, tf.train.Optimizer):
            raise ConfigError("`optimizer` should be tensorflow.train.Optimizer subclass")
        self._clip_norm = clip_norm

        LRScheduledModel.__init__(self, momentum=momentum, **kwargs)
    def __init__(self,
                 learning_rate: Union[float, Tuple[float, float]] = None,
                 learning_rate_decay: Union[DType, Tuple[DType,
                                                         Any]] = DecayType.NO,
                 learning_rate_decay_epochs: int = 0,
                 learning_rate_decay_batches: int = 0,
                 learning_rate_drop_div: float = 2.0,
                 learning_rate_drop_patience: int = None,
                 momentum: Union[float, Tuple[float, float]] = None,
                 momentum_decay: Union[DType, Tuple[DType,
                                                    Any]] = DecayType.NO,
                 momentum_decay_epochs: int = 0,
                 momentum_decay_batches: int = 0,
                 optimizer: str = 'AdamOptimizer',
                 clip_norm: float = None,
                 fit_batch_size: Union[int, str] = None,
                 fit_learning_rate: Tuple[float, float] = [1e-7, 100],
                 fit_learning_rate_div: float = 10.,
                 fit_beta: float = 0.98,
                 fit_min_batches: int = 10,
                 fit_max_batches: int = None,
                 *args,
                 **kwargs) -> None:
        if learning_rate_decay_epochs and learning_rate_decay_batches:
            raise ConfigError("isn't able to update learning rate every batch"
                              " and every epoch sumalteniously")
        if momentum_decay_epochs and momentum_decay_batches:
            raise ConfigError("isn't able to update momentum every batch"
                              " and every epoch sumalteniously")
        super().__init__(*args, **kwargs)

        try:
            self._optimizer = cls_from_str(optimizer)
        except Exception:
            self._optimizer = getattr(tf.train, optimizer.split(':')[-1])
        if not issubclass(self._optimizer, tf.train.Optimizer):
            raise ConfigError(
                "`optimizer` should be tensorflow.train.Optimizer subclass")

        start_val, end_val = learning_rate, None
        if isinstance(learning_rate, (tuple, list)):
            start_val, end_val = learning_rate
        dec_type, extra = learning_rate_decay, None
        if isinstance(learning_rate_decay, (tuple, list)):
            dec_type, extra = learning_rate_decay

        self._lr = start_val
        num_it, self._lr_update_on_batch = learning_rate_decay_epochs, False
        if learning_rate_decay_batches > 0:
            num_it, self._lr_update_on_batch = learning_rate_decay_batches, True

        self._lr_schedule = DecayScheduler(start_val=start_val,
                                           end_val=end_val,
                                           num_it=num_it,
                                           dec_type=dec_type,
                                           extra=extra)
        #self._lr_var = tf.placeholder(tf.float32, shape=[], name='learning_rate')
        self._lr_var = tf.Variable(self._lr or 0.,
                                   dtype=tf.float32,
                                   name='learning_rate')

        if (momentum is None) and\
                self._optimizer not in (tf.train.AdagradDAOptimizer,
                                        tf.train.AdagradOptimizer,
                                        tf.train.GradientDescentOptimizer,
                                        tf.train.ProximalGradientDescentOptimizer,
                                        tf.train.ProximalAdagradOptimizer):
            momentum = 0.9
        start_val, end_val = momentum, None
        if isinstance(momentum, (tuple, list)):
            start_val, end_val = momentum
        dec_type, extra = momentum_decay, None
        if isinstance(momentum_decay, (tuple, list)):
            dec_type, extra = momentum_decay

        self._mom = start_val
        num_it, self._mom_update_on_batch = momentum_decay_epochs, False
        if momentum_decay_batches > 0:
            num_it, self._mom_update_on_batch = momentum_decay_batches, True

        self._mom_schedule = DecayScheduler(start_val=start_val,
                                            end_val=end_val,
                                            num_it=num_it,
                                            dec_type=dec_type,
                                            extra=extra)
        # self._mom_var = tf.placeholder_with_default(0.9, shape=[], name='momentum')
        # self._mom_var = tf.placeholder(tf.float32, shape=[], name='momentum')
        self._mom_var = tf.Variable(self._mom or 0.,
                                    dtype=tf.float32,
                                    name='momentum')

        self._learning_rate_drop_patience = learning_rate_drop_patience
        self._learning_rate_drop_div = learning_rate_drop_div
        self._learning_rate_cur_impatience = 0.
        self._learning_rate_last_impatience = 0.
        self._learning_rate_cur_div = 1.
        self._clip_norm = clip_norm
        self._fit_batch_size = fit_batch_size
        self._fit_learning_rate = fit_learning_rate
        self._fit_learning_rate_div = fit_learning_rate_div
        self._fit_beta = fit_beta
        self._fit_min_batches = fit_min_batches
        self._fit_max_batches = fit_max_batches
        self._external_lr = False
        self._external_mom = False
예제 #8
0
    def __init__(self,
                 learning_rate: Union[float, Tuple[float, float]] = None,
                 learning_rate_decay: Union[DType, Tuple[DType, Any]] = DecayType.NO,
                 learning_rate_decay_epochs: int = 0,
                 learning_rate_decay_batches: int = 0,
                 learning_rate_drop_div: float = 2.0,
                 learning_rate_drop_patience: int = None,
                 momentum: Union[float, Tuple[float, float]] = None,
                 momentum_decay: Union[DType, Tuple[DType, Any]] = DecayType.NO,
                 momentum_decay_epochs: int = 0,
                 momentum_decay_batches: int = 0,
                 optimizer: str = 'AdamOptimizer',
                 clip_norm: float = None,
                 fit_batch_size: Union[int, str] = None,
                 fit_learning_rate: Tuple[float, float] = [1e-7, 100],
                 fit_learning_rate_div: float = 10.,
                 fit_beta: float = 0.98,
                 fit_min_batches: int = 10,
                 fit_max_batches: int = None,
                 *args, **kwargs) -> None:
        if learning_rate_decay_epochs and learning_rate_decay_batches:
            raise ConfigError("isn't able to update learning rate every batch"
                              " and every epoch sumalteniously")
        if momentum_decay_epochs and momentum_decay_batches:
            raise ConfigError("isn't able to update momentum every batch"
                              " and every epoch sumalteniously")
        super().__init__(*args, **kwargs)

        try:
            self._optimizer = cls_from_str(optimizer)
        except Exception:
            self._optimizer = getattr(tf.train, optimizer.split(':')[-1])
        if not issubclass(self._optimizer, tf.train.Optimizer):
            raise ConfigError("`optimizer` should be tensorflow.train.Optimizer subclass")

        start_val, end_val = learning_rate, None
        if isinstance(learning_rate, (tuple, list)):
            start_val, end_val = learning_rate
        dec_type, extra = learning_rate_decay, None
        if isinstance(learning_rate_decay, (tuple, list)):
            dec_type, extra = learning_rate_decay

        self._lr = start_val
        num_it, self._lr_update_on_batch = learning_rate_decay_epochs, False
        if learning_rate_decay_batches > 0:
            num_it, self._lr_update_on_batch = learning_rate_decay_batches, True

        self._lr_schedule = DecayScheduler(start_val=start_val, end_val=end_val,
                                           num_it=num_it, dec_type=dec_type, extra=extra)
        #self._lr_var = tf.placeholder(tf.float32, shape=[], name='learning_rate')
        self._lr_var = tf.Variable(self._lr or 0., dtype=tf.float32, name='learning_rate')

        if (momentum is None) and\
                self._optimizer not in (tf.train.AdagradDAOptimizer,
                                        tf.train.AdagradOptimizer,
                                        tf.train.GradientDescentOptimizer,
                                        tf.train.ProximalGradientDescentOptimizer,
                                        tf.train.ProximalAdagradOptimizer):
            momentum = 0.9
        start_val, end_val = momentum, None
        if isinstance(momentum, (tuple, list)):
            start_val, end_val = momentum
        dec_type, extra = momentum_decay, None
        if isinstance(momentum_decay, (tuple, list)):
            dec_type, extra = momentum_decay

        self._mom = start_val
        num_it, self._mom_update_on_batch = momentum_decay_epochs, False
        if momentum_decay_batches > 0:
            num_it, self._mom_update_on_batch = momentum_decay_batches, True

        self._mom_schedule = DecayScheduler(start_val=start_val, end_val=end_val,
                                            num_it=num_it, dec_type=dec_type,
                                            extra=extra)
        # self._mom_var = tf.placeholder_with_default(0.9, shape=[], name='momentum')
        # self._mom_var = tf.placeholder(tf.float32, shape=[], name='momentum')
        self._mom_var = tf.Variable(self._mom or 0., dtype=tf.float32, name='momentum')

        self._learning_rate_drop_patience = learning_rate_drop_patience
        self._learning_rate_drop_div = learning_rate_drop_div
        self._learning_rate_cur_impatience = 0.
        self._learning_rate_last_impatience = 0.
        self._learning_rate_cur_div = 1.
        self._clip_norm = clip_norm
        self._fit_batch_size = fit_batch_size
        self._fit_learning_rate = fit_learning_rate
        self._fit_learning_rate_div = fit_learning_rate_div
        self._fit_beta = fit_beta
        self._fit_min_batches = fit_min_batches
        self._fit_max_batches = fit_max_batches
        self._external_lr = False
        self._external_mom = False