예제 #1
0
 def opt(self, epoch=None, eps=None):
     """
     Main procedure of opt
     :param epoch : Maximum iteration ; default: 1000
     :param eps   : Tolerance         ; default: 1e-8
     :return      : x*, f*, n_iter, feva
     """
     if epoch is None:
         epoch = self._params["epoch"]
     if eps is None:
         eps = self._params["eps"]
     self._func.refresh_cache(self._x)
     self._loss_cache, self._grad_cache = self.func(0), self.func(1)
     bar = ProgressBar(max_value=epoch, name="Opt")
     for _ in range(epoch):
         self.iter += 1
         with warnings.catch_warnings():
             warnings.filterwarnings("error")
             try:
                 if self._core(eps):
                     break
                 self.log.append(self._loss_cache)
             except RuntimeWarning as err:
                 print("\n", err, "\n")
                 break
             except np.linalg.linalg.LinAlgError as err:
                 print("\n", err, "\n")
                 break
         bar.update()
     bar.update()
     bar.terminate()
     return self._x, self._loss_cache, self.iter, self.feva
예제 #2
0
 def opt(self, epoch=None, eps=None):
     """
     Main procedure of opt
     :param epoch : Maximum iteration ; default: 1000
     :param eps   : Tolerance         ; default: 1e-8
     :return      : x*, f*, n_iter, feva
     """
     if epoch is None:
         epoch = self._params["epoch"]
     if eps is None:
         eps = self._params["eps"]
     self._func.refresh_cache(self._x)
     self._loss_cache, self._grad_cache = self.func(0), self.func(1)
     bar = ProgressBar(max_value=epoch, name="Opt")
     bar.start()
     for _ in range(epoch):
         self.iter += 1
         with warnings.catch_warnings():
             warnings.filterwarnings("error")
             try:
                 if self._core(eps):
                     break
                 self.log.append(self._loss_cache)
             except RuntimeWarning as err:
                 print("\n", err, "\n")
                 break
             except np.linalg.linalg.LinAlgError as err:
                 print("\n", err, "\n")
                 break
         bar.update()
     bar.update()
     bar.terminate()
     return self._x, self._loss_cache, self.iter, self.feva
예제 #3
0
    def fit(self, x, y, sample_weight=None, lr=None, epoch=None, animation_params=None):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        *animation_properties, animation_params = self._get_animation_params(animation_params)

        x, y = np.atleast_2d(x), np.asarray(y)
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._w = np.random.random(x.shape[1])
        self._b = 0.
        ims = []
        bar = ProgressBar(max_value=epoch, name="Perceptron")
        for i in range(epoch):
            y_pred = self.predict(x, True)
            err = -y * y_pred * sample_weight
            idx = np.argmax(err)
            if err[idx] < 0:
                bar.terminate()
                break
            w_norm = np.linalg.norm(self._w)
            delta = lr * y[idx] * sample_weight[idx] / w_norm
            self._w += delta * (x[idx] - y_pred[idx] * self._w / w_norm ** 2)
            self._b += delta
            self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
예제 #4
0
    def fit(self,
            x,
            y,
            sample_weight=None,
            c=None,
            lr=None,
            optimizer=None,
            batch_size=None,
            epoch=None,
            tol=None,
            animation_params=None):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]
        if c is None:
            c = self._params["c"]
        if lr is None:
            lr = self._params["lr"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if epoch is None:
            epoch = self._params["epoch"]
        if tol is None:
            tol = self._params["tol"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        *animation_properties, animation_params = self._get_animation_params(
            animation_params)
        x, y = np.atleast_2d(x), np.asarray(y, dtype=np.float32)
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._w = np.zeros(x.shape[1], dtype=np.float32)
        self._b = np.zeros(1, dtype=np.float32)
        self._model_parameters = [self._w, self._b]
        self._optimizer = OptFactory().get_optimizer_by_name(
            optimizer, self._model_parameters, lr, epoch)

        bar = ProgressBar(max_value=epoch, name="LinearSVM")
        ims = []
        train_repeat = self._get_train_repeat(x, batch_size)
        for i in range(epoch):
            self._optimizer.update()
            l = self._batch_training(x, y, batch_size, train_repeat,
                                     sample_weight, c)
            if l < tol:
                bar.terminate()
                break
            self._handle_animation(i, x, y, ims, animation_params,
                                   *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
예제 #5
0
    def fit(self,
            x,
            y,
            c=None,
            lr=None,
            batch_size=None,
            epoch=None,
            tol=None,
            optimizer=None,
            animation_params=None):
        if c is None:
            c = self._params["c"]
        if lr is None:
            lr = self._params["lr"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if epoch is None:
            epoch = self._params["epoch"]
        if tol is None:
            tol = self._params["tol"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        *animation_properties, animation_params = self._get_animation_params(
            animation_params)
        x, y = np.atleast_2d(x), np.asarray(y)
        y_2d = y[..., None]

        self._w = tf.Variable(np.zeros([x.shape[1], 1]),
                              dtype=tf.float32,
                              name="w")
        self._b = tf.Variable(0., dtype=tf.float32, name="b")
        self._tfx = tf.placeholder(tf.float32, [None, x.shape[1]])
        self._tfy = tf.placeholder(tf.float32, [None, 1])
        self._y_pred_raw = tf.matmul(self._tfx, self._w) + self._b
        self._y_pred = tf.sign(self._y_pred_raw)
        loss = tf.reduce_sum(tf.nn.relu(
            1 - self._tfy * self._y_pred_raw)) + c * tf.nn.l2_loss(self._w)
        train_step = TFOptFac().get_optimizer_by_name(optimizer,
                                                      lr).minimize(loss)
        self._sess.run(tf.global_variables_initializer())
        bar = ProgressBar(max_value=epoch, name="TFLinearSVM")
        ims = []
        train_repeat = self._get_train_repeat(x, batch_size)
        for i in range(epoch):
            l = self._batch_training(x, y_2d, batch_size, train_repeat, loss,
                                     train_step)
            if l < tol:
                bar.terminate()
                break
            self._handle_animation(i, x, y, ims, animation_params,
                                   *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
예제 #6
0
        def fit(self,
                x,
                y,
                c=None,
                lr=None,
                batch_size=None,
                epoch=None,
                tol=None,
                optimizer=None,
                animation_params=None):
            if c is None:
                c = self._params["c"]
            if lr is None:
                lr = self._params["lr"]
            if batch_size is None:
                batch_size = self._params["batch_size"]
            if epoch is None:
                epoch = self._params["epoch"]
            if tol is None:
                tol = self._params["tol"]
            if optimizer is None:
                optimizer = self._params["optimizer"]
            *animation_properties, animation_params = self._get_animation_params(
                animation_params)
            x, y = np.atleast_2d(x), np.asarray(y, dtype=np.float32)
            y_2d = y[..., None]

            self._w = Variable(torch.rand([x.shape[1], 1]), requires_grad=True)
            self._b = Variable(torch.Tensor([0.]), requires_grad=True)
            self._model_parameters = [self._w, self._b]
            self._optimizer = PyTorchOptFac().get_optimizer_by_name(
                optimizer, self._model_parameters, lr, epoch)

            x, y, y_2d = self._arr_to_variable(False, x, y, y_2d)
            loss_function = lambda _y, _y_pred: self._loss(_y, _y_pred, c)

            bar = ProgressBar(max_value=epoch, name="TorchLinearSVM")
            ims = []
            train_repeat = self._get_train_repeat(x, batch_size)
            for i in range(epoch):
                self._optimizer.update()
                l = self.batch_training(x, y_2d, batch_size, train_repeat,
                                        loss_function)
                if l < tol:
                    bar.terminate()
                    break
                self._handle_animation(i, x, y, ims, animation_params,
                                       *animation_properties)
                bar.update()
            self._handle_mp4(ims, animation_properties)
예제 #7
0
    def fit(self, x, y, sample_weight=None, c=None, lr=None, optimizer=None,
            batch_size=None, epoch=None, tol=None, animation_params=None):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]
        if c is None:
            c = self._params["c"]
        if lr is None:
            lr = self._params["lr"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if epoch is None:
            epoch = self._params["epoch"]
        if tol is None:
            tol = self._params["tol"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        *animation_properties, animation_params = self._get_animation_params(animation_params)
        x, y = np.atleast_2d(x), np.asarray(y, dtype=np.float32)
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._w = np.zeros(x.shape[1], dtype=np.float32)
        self._b = np.zeros(1, dtype=np.float32)
        self._model_parameters = [self._w, self._b]
        self._optimizer = OptFactory().get_optimizer_by_name(
            optimizer, self._model_parameters, lr, epoch
        )

        bar = ProgressBar(max_value=epoch, name="LinearSVM")
        ims = []
        train_repeat = self._get_train_repeat(x, batch_size)
        for i in range(epoch):
            self._optimizer.update()
            l = self._batch_training(
                x, y, batch_size, train_repeat, sample_weight, c
            )
            if l < tol:
                bar.terminate()
                break
            self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
예제 #8
0
        def fit(self, x, y, c=None, lr=None, batch_size=None, epoch=None, tol=None,
                optimizer=None, animation_params=None):
            if c is None:
                c = self._params["c"]
            if lr is None:
                lr = self._params["lr"]
            if batch_size is None:
                batch_size = self._params["batch_size"]
            if epoch is None:
                epoch = self._params["epoch"]
            if tol is None:
                tol = self._params["tol"]
            if optimizer is None:
                optimizer = self._params["optimizer"]
            *animation_properties, animation_params = self._get_animation_params(animation_params)
            x, y = np.atleast_2d(x), np.asarray(y, dtype=np.float32)
            y_2d = y[..., None]

            self._w = Variable(torch.rand([x.shape[1], 1]), requires_grad=True)
            self._b = Variable(torch.Tensor([0.]), requires_grad=True)
            self._model_parameters = [self._w, self._b]
            self._optimizer = PyTorchOptFac().get_optimizer_by_name(
                optimizer, self._model_parameters, lr, epoch
            )

            x, y, y_2d = self._arr_to_variable(False, x, y, y_2d)
            loss_function = lambda _y, _y_pred: self._loss(_y, _y_pred, c)

            bar = ProgressBar(max_value=epoch, name="TorchLinearSVM")
            ims = []
            train_repeat = self._get_train_repeat(x, batch_size)
            for i in range(epoch):
                self._optimizer.update()
                l = self.batch_training(
                    x, y_2d, batch_size, train_repeat, loss_function
                )
                if l < tol:
                    bar.terminate()
                    break
                self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
                bar.update()
            self._handle_mp4(ims, animation_properties)
예제 #9
0
    def fit(self,
            x,
            y,
            sample_weight=None,
            lr=None,
            epoch=None,
            animation_params=None):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]
        if lr is None:
            lr = self._params["lr"]
        if epoch is None:
            epoch = self._params["epoch"]
        *animation_properties, animation_params = self._get_animation_params(
            animation_params)

        x, y = np.atleast_2d(x), np.asarray(y)
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._w = np.random.random(x.shape[1])
        self._b = 0.
        ims = []
        bar = ProgressBar(max_value=epoch, name="Perceptron")
        for i in range(epoch):
            y_pred = self.predict(x, True)
            err = -y * y_pred * sample_weight
            idx = np.argmax(err)
            if err[idx] < 0:
                bar.terminate()
                break
            w_norm = np.linalg.norm(self._w)
            delta = lr * y[idx] * sample_weight[idx] / w_norm
            self._w += delta * (x[idx] - y_pred[idx] * self._w / w_norm**2)
            self._b += delta
            self._handle_animation(i, x, y, ims, animation_params,
                                   *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
예제 #10
0
    def fit(self, x, y, c=None, lr=None, batch_size=None, epoch=None, tol=None,
            optimizer=None, animation_params=None):
        if c is None:
            c = self._params["c"]
        if lr is None:
            lr = self._params["lr"]
        if batch_size is None:
            batch_size = self._params["batch_size"]
        if epoch is None:
            epoch = self._params["epoch"]
        if tol is None:
            tol = self._params["tol"]
        if optimizer is None:
            optimizer = self._params["optimizer"]
        *animation_properties, animation_params = self._get_animation_params(animation_params)
        x, y = np.atleast_2d(x), np.asarray(y)
        y_2d = y[..., None]

        self._w = tf.Variable(np.zeros([x.shape[1], 1]), dtype=tf.float32, name="w")
        self._b = tf.Variable(0., dtype=tf.float32, name="b")
        self._tfx = tf.placeholder(tf.float32, [None, x.shape[1]])
        self._tfy = tf.placeholder(tf.float32, [None, 1])
        self._y_pred_raw = tf.matmul(self._tfx, self._w) + self._b
        self._y_pred = tf.sign(self._y_pred_raw)
        loss = tf.reduce_sum(
            tf.nn.relu(1 - self._tfy * self._y_pred_raw)
        ) + c * tf.nn.l2_loss(self._w)
        train_step = TFOptFac().get_optimizer_by_name(optimizer, lr).minimize(loss)
        self._sess.run(tf.global_variables_initializer())
        bar = ProgressBar(max_value=epoch, name="TFLinearSVM")
        ims = []
        train_repeat = self._get_train_repeat(x, batch_size)
        for i in range(epoch):
            l = self._batch_training(x, y_2d, batch_size, train_repeat, loss, train_step)
            if l < tol:
                bar.terminate()
                break
            self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
예제 #11
0
    def fit(self, x, y, sample_weight=None, kernel=None, epoch=None,
            x_test=None, y_test=None, metrics=None, animation_params=None, **kwargs):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]  # type: list
        if kernel is None:
            kernel = self._params["kernel"]
        if epoch is None:
            epoch = self._params["epoch"]
        if x_test is None:
            x_test = self._params["x_test"]  # type: list
        if y_test is None:
            y_test = self._params["y_test"]  # type: list
        if metrics is None:
            metrics = self._params["metrics"]  # type: list
        *animation_properties, animation_params = self._get_animation_params(animation_params)
        self._x, self._y = np.atleast_2d(x), np.asarray(y)
        if kernel == "poly":
            _p = kwargs.get("p", self._params["p"])
            self._kernel_name = "Polynomial"
            self._kernel_param = "degree = {}".format(_p)
            self._kernel = lambda _x, _y: KernelBase._poly(_x, _y, _p)
        elif kernel == "rbf":
            _gamma = kwargs.get("gamma", 1 / self._x.shape[1])
            self._kernel_name = "RBF"
            self._kernel_param = r"$\gamma = {:8.6}$".format(_gamma)
            self._kernel = lambda _x, _y: KernelBase._rbf(_x, _y, _gamma)
        else:
            raise NotImplementedError("Kernel '{}' has not defined".format(kernel))
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._alpha, self._w, self._prediction_cache = (
            np.zeros(len(x)), np.zeros(len(x)), np.zeros(len(x)))
        self._gram = self._kernel(self._x, self._x)
        self._b = 0
        self._prepare(sample_weight, **kwargs)

        fit_args, logs, ims = [], [], []
        for name, arg in zip(self._fit_args_names, self._fit_args):
            if name in kwargs:
                arg = kwargs[name]
            fit_args.append(arg)
        if self._do_log:
            if metrics is not None:
                self.get_metrics(metrics)
            test_gram = None
            if x_test is not None and y_test is not None:
                x_cv, y_cv = np.atleast_2d(x_test), np.asarray(y_test)
                test_gram = self._kernel(self._x, x_cv)
            else:
                x_cv, y_cv = self._x, self._y
        else:
            y_cv = test_gram = None

        if self._is_torch:
            y_cv, self._x, self._y = self._torch_transform(y_cv)

        bar = ProgressBar(max_value=epoch, name=str(self))
        for i in range(epoch):
            if self._fit(sample_weight, *fit_args):
                bar.terminate()
                break
            if self._do_log and metrics is not None:
                local_logs = []
                for metric in metrics:
                    if test_gram is None:
                        if self._is_torch:
                            local_y = self._y.data.numpy()
                        else:
                            local_y = self._y
                        local_logs.append(metric(local_y, np.sign(self._prediction_cache)))
                    else:
                        if self._is_torch:
                            local_y = y_cv.data.numpy()
                        else:
                            local_y = y_cv
                        local_logs.append(metric(local_y, self.predict(test_gram, gram_provided=True)))
                logs.append(local_logs)
            self._handle_animation(i, self._x, self._y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
        return logs
예제 #12
0
    def fit(self, x, y, sample_weight=None, kernel=None, epoch=None,
            x_test=None, y_test=None, metrics=None, animation_params=None, **kwargs):
        if sample_weight is None:
            sample_weight = self._params["sample_weight"]  # type: list
        if kernel is None:
            kernel = self._params["kernel"]
        if epoch is None:
            epoch = self._params["epoch"]
        if x_test is None:
            x_test = self._params["x_test"]  # type: list
        if y_test is None:
            y_test = self._params["y_test"]  # type: list
        if metrics is None:
            metrics = self._params["metrics"]  # type: list
        *animation_properties, animation_params = self._get_animation_params(animation_params)
        self._x, self._y = np.atleast_2d(x), np.asarray(y)
        if kernel == "poly":
            _p = kwargs.get("p", self._params["p"])
            self._kernel_name = "Polynomial"
            self._kernel_param = "degree = {}".format(_p)
            self._kernel = lambda _x, _y: KernelBase._poly(_x, _y, _p)
        elif kernel == "rbf":
            _gamma = kwargs.get("gamma", 1 / self._x.shape[1])
            self._kernel_name = "RBF"
            self._kernel_param = r"$\gamma = {:8.6}$".format(_gamma)
            self._kernel = lambda _x, _y: KernelBase._rbf(_x, _y, _gamma)
        else:
            raise NotImplementedError("Kernel '{}' has not defined".format(kernel))
        if sample_weight is None:
            sample_weight = np.ones(len(y))
        else:
            sample_weight = np.asarray(sample_weight) * len(y)

        self._alpha, self._w, self._prediction_cache = (
            np.zeros(len(x)), np.zeros(len(x)), np.zeros(len(x)))
        self._gram = self._kernel(self._x, self._x)
        self._b = 0
        self._prepare(sample_weight, **kwargs)

        fit_args, logs, ims = [], [], []
        for name, arg in zip(self._fit_args_names, self._fit_args):
            if name in kwargs:
                arg = kwargs[name]
            fit_args.append(arg)
        if self._do_log:
            if metrics is not None:
                self.get_metrics(metrics)
            test_gram = None
            if x_test is not None and y_test is not None:
                x_cv, y_cv = np.atleast_2d(x_test), np.asarray(y_test)
                test_gram = self._kernel(self._x, x_cv)
            else:
                x_cv, y_cv = self._x, self._y
        else:
            y_cv = test_gram = None

        if self._is_torch:
            y_cv, self._x, self._y = self._torch_transform(y_cv)

        bar = ProgressBar(max_value=epoch, name=str(self))
        for i in range(epoch):
            if self._fit(sample_weight, *fit_args):
                bar.terminate()
                break
            if self._do_log and metrics is not None:
                local_logs = []
                for metric in metrics:
                    if test_gram is None:
                        if self._is_torch:
                            local_y = self._y.data.numpy()
                        else:
                            local_y = self._y
                        local_logs.append(metric(local_y, np.sign(self._prediction_cache)))
                    else:
                        if self._is_torch:
                            local_y = y_cv.data.numpy()
                        else:
                            local_y = y_cv
                        local_logs.append(metric(local_y, self.predict(test_gram, gram_provided=True)))
                logs.append(local_logs)
            self._handle_animation(i, self._x, self._y, ims, animation_params, *animation_properties)
            bar.update()
        self._handle_mp4(ims, animation_properties)
        return logs