예제 #1
0
    def stop_training(self, msg=None):
        """Stop training phase and start an execute phase with a target.

        The possible return types are None, y, (y, msg), (y, msg, target).
        For None nothing more happens, the training phase ends like for a
        standard MDP node.
        If a return value is given then an excute phase is started.

        This template method normally calls a _stop_training method from
        self._train_seq.

        If a stop_result was given in __init__ then it is used but can be
        overwritten by the returned _stop_training result or by the 
        msg argument provided by the BiFlow.
        """
        # basic checks
        if self.is_training() and self._train_phase_started == False:
            raise mdp.TrainingException("The node has not been trained.")
        if not self.is_training():
            err = "The training phase has already finished."
            raise mdp.TrainingFinishedException(err)
        # call stop_training
        if not msg:
            result = self._train_seq[self._train_phase][1]()
            target = None
        else:
            msg_id_keys = self._get_msg_id_keys(msg)
            target = self._extract_message_key("target", msg, msg_id_keys)
            method_name = self._extract_message_key("method", msg, msg_id_keys)
            default_method = self._train_seq[self._train_phase][1]
            method, target = self._get_method(method_name, default_method,
                                              target)
            msg, arg_dict = self._extract_method_args(method, msg, msg_id_keys)
            result = method(**arg_dict)
        # close the current phase
        self._train_phase += 1
        self._train_phase_started = False
        # check if we have some training phase left
        if self.get_remaining_train_phase() == 0:
            self._training = False
        # use stored stop message and update it with the result
        if self._stop_result:
            if self.has_multiple_training_phases():
                stored_stop_result = self._stop_result[self._train_phase - 1]
            else:
                stored_stop_result = self._stop_result
            # make sure that the original dict in stored_stop_result is not
            # modified (this could have unexpected consequences in some cases)
            stored_msg = stored_stop_result[0].copy()
            if msg:
                stored_msg.update(msg)
            msg = stored_msg
            if target is None:
                target = stored_stop_result[1]
        return self._combine_result(result, msg, target)
예제 #2
0
    def train(self,
              v,
              l,
              n_updates=1,
              epsilon=0.1,
              decay=0.,
              momentum=0.,
              verbose=False):
        """Update the internal structures according to the visible data `v`
        and the labels `l`.
        The training is performed using Contrastive Divergence (CD).

        :param v: A binary matrix having different variables on different
            columns and observations on the rows.
        :type v: numpy.ndarray

        :param l: A binary matrix having different variables on different
            columns and observations on the rows. Only one value per row should
            be 1.
        :type l: numpy.ndarray

        :param n_updates: Number of CD iterations. Default value: 1
        :type n_updates: int

        :param epsilon: Learning rate. Default value: 0.1
        :type epsilon: float

        :param decay: Weight decay term. Default value: 0.
        :type decay: float

        :param momentum: Momentum term. Default value: 0.
        :type momentum: float

        :param verbose: Controls the verbosity.
        :type verbose: bool
        """

        if not self.is_training():
            errstr = "The training phase has already finished."
            raise mdp.TrainingFinishedException(errstr)

        x = numx.concatenate((v, l), axis=1)
        self._check_input(x)

        self._train_phase_started = True
        self._train_seq[self._train_phase][0](self._refcast(x),
                                              n_updates=n_updates,
                                              epsilon=epsilon,
                                              decay=decay,
                                              momentum=momentum,
                                              verbose=verbose)
예제 #3
0
    def train(self, x, n_updates=1, epsilon=0.1, decay=0., momentum=0.):
        """Update the parameters according to the input 'v' and context 'x'.
        The training is performed using Contrastive Divergence (CD).

            - v: a binary matrix having different variables on different columns and observations on the rows.
            - x: a matrix having different variables on different columns and observations on the rows.
            - n_updates: number of CD iterations. Default value: 1
            - epsilon: learning rate. Default value: 0.1
            - decay: weight decay term. Default value: 0.
            - momentum: momentum term. Default value: 0.
        """

        if not self.is_training():
            errstr = "The training phase has already finished."
            raise mdp.TrainingFinishedException(errstr)

        self._train_phase_started = True
        self._train(x, n_updates, epsilon, decay, momentum)
예제 #4
0
    def train(self,
              v,
              l,
              n_updates=1,
              epsilon=0.1,
              decay=0.,
              momentum=0.,
              verbose=False):
        """Update the internal structures according to the visible data `v`
        and the labels `l`.
        The training is performed using Contrastive Divergence (CD).

        :Parameters:
          v
            a binary matrix having different variables on different columns
            and observations on the rows
          l
            a binary matrix having different variables on different columns
            and observations on the rows. Only one value per row should be 1.
          n_updates
            number of CD iterations. Default value: 1
          epsilon
            learning rate. Default value: 0.1
          decay
            weight decay term. Default value: 0.
          momentum
            momentum term. Default value: 0.
        """

        if not self.is_training():
            errstr = "The training phase has already finished."
            raise mdp.TrainingFinishedException(errstr)

        x = numx.concatenate((v, l), axis=1)
        self._check_input(x)

        self._train_phase_started = True
        self._train_seq[self._train_phase][0](self._refcast(x),
                                              n_updates=n_updates,
                                              epsilon=epsilon,
                                              decay=decay,
                                              momentum=momentum,
                                              verbose=verbose)
예제 #5
0
    def train(self, x, msg=None):
        """Train and return None or more if the execution should continue.

        The possible return types are None, y, (y, msg), (y, msg, target).
        The last entry in a result tuple must not be None.
        y can be None if the result is a tuple.

        This template method normally calls the corresponding _train method
        or another method as specified in the message (using the magic 'method'
        key.

        Note that the remaining msg and taret values are only used if _train
        (or the requested method) returns something different from None
        (so an empty dict can be used to trigger continued execution).
        """
        # perform checks, adapted from Node.train
        if not self.is_trainable():
            raise mdp.IsNotTrainableException("This node is not trainable.")
        if not self.is_training():
            err = "The training phase has already finished."
            raise mdp.TrainingFinishedException(err)
        if msg is None:
            if x is None:
                err = "Both x and msg are None."
                raise BiNodeException(err)
            # no fall-back on Node.train because we might have a return value
            self._check_input(x)
            try:
                self._check_train_args(x)
            except TypeError:
                err = ("%s training seems to require " % str(self) +
                       "additional arguments, but none were given.")
                raise BiNodeException(err)
            self._train_phase_started = True
            x = self._refcast(x)
            return self._train_seq[self._train_phase][0](x)
        msg_id_keys = self._get_msg_id_keys(msg)
        target = self._extract_message_key("target", msg, msg_id_keys)
        method_name = self._extract_message_key("method", msg, msg_id_keys)
        default_method = self._train_seq[self._train_phase][0]
        method, target = self._get_method(method_name, default_method, target)
        msg, arg_dict = self._extract_method_args(method, msg, msg_id_keys)
        # perform specific checks
        if x is not None:
            if (not method_name) or (method_name == "train"):
                self._check_input(x)
                try:
                    self._check_train_args(x, **arg_dict)
                except TypeError:
                    err = ("The given additional arguments %s " %
                           str(list(arg_dict.keys())) +
                           "are not compatible with training %s." % str(self))
                    raise BiNodeException(err)
                self._train_phase_started = True
                x = self._refcast(x)
            elif method == self._inverse:
                self._pre_inversion_checks(x)
        result = method(x, **arg_dict)
        if result is None:
            return None
        result = self._combine_result(result, msg, target)
        if (isinstance(result, tuple) and len(result) == 2
                and result[0] is None):
            # drop the remaining msg, so that no maual clearing is required
            return None
        return result