Exemplo n.º 1
0
 def fit(self, X, y):
     _X = copy.deepcopy(X)
     _y = copy.deepcopy(y)
     assert self.__check_valid(_X, _y), 'input is invalid.'
     if self._is_trained is False:
         self._label2ix = {label: i for i, label in enumerate(np.unique(_y))}
         self._ix2label = {i: label for i, label in enumerate(np.unique(_y))}
         self._nFeat = _X.shape[1]
         self._nClass = len(np.unique(_y))
         assert self._nClass == 2, 'class number must be 2.'
         W = np.random.uniform(-0.08, 0.08, (self._nFeat, 1))
         b = np.zeros(1)
         self._parameter_shape.append(W.shape)
         self._parameter_shape.append(b.shape)
         self._parameter = roll_parameter([W, b])
     _y = np.array([self._label2ix[label] for label in _y])
     nSize = _X.shape[0]
     assert nSize >= self._batch_size, 'batch size must less or equal than X size.'
     # optimizer = StochasticGradientDescent(learning_rate=self._learning_rate, batch_size=self._batch_size,
     #                                       decay_strategy='anneal', max_iter=self._max_iter, is_plot_loss=True)
     # optimizer = MomentumSGD(learning_rate=self._learning_rate, batch_size=self._batch_size, momentum=0.9,
     #                         momentum_type='standard', max_iter=self._max_iter, is_plot_loss=True)
     optimizer = LBFGS(max_iter=self._max_iter)
     self._parameter = optimizer.optim(feval=self.feval, X=_X, y=_y, parameter=self._parameter)
     self._is_trained = True
Exemplo n.º 2
0
 def fit(self, X, y):
     _X = copy.deepcopy(X)
     _y = copy.deepcopy(y)
     assert self.__check_valid(_X, _y), 'input is invalid.'
     if self._is_trained is False:
         self._label2ix = {
             label: i
             for i, label in enumerate(np.unique(_y))
         }
         self._ix2label = {
             i: label
             for i, label in enumerate(np.unique(_y))
         }
         self._nFeat = _X.shape[1]
         self._nClass = len(np.unique(_y))
         assert self._nClass == 2, 'class number must be 2.'
         W = np.random.uniform(-0.08, 0.08, (self._nFeat, 1))
         b = np.zeros(1)
         self._parameter_shape.append(W.shape)
         self._parameter_shape.append(b.shape)
         self._parameter = roll_parameter([W, b])
     _y = np.array([self._label2ix[label] for label in _y])
     nSize = _X.shape[0]
     assert nSize >= self._batch_size, 'batch size must less or equal than X size.'
     # optimizer = StochasticGradientDescent(learning_rate=self._learning_rate, batch_size=self._batch_size,
     #                                       decay_strategy='anneal', max_iter=self._max_iter, is_plot_loss=True)
     # optimizer = MomentumSGD(learning_rate=self._learning_rate, batch_size=self._batch_size, momentum=0.9,
     #                         momentum_type='standard', max_iter=self._max_iter, is_plot_loss=True)
     optimizer = LBFGS(max_iter=self._max_iter)
     self._parameter = optimizer.optim(feval=self.feval,
                                       X=_X,
                                       y=_y,
                                       parameter=self._parameter)
     self._is_trained = True
Exemplo n.º 3
0
 def fit(self, X, y):
     _X = copy.deepcopy(X)
     _y = copy.deepcopy(y)
     assert self.__check_valid(_X, _y), 'input is invalid.'
     if self._normalize is True:
         _X, _y, self.Xmean, self.ymean, self.Xstd = normalize_data(_X, _y, inplace=True)
     if self._is_trained is False:
         self._nFeat = _X.shape[1]
     if self._solve_type == 'numeric':
         if self._is_trained is False:
             W = np.random.uniform(-0.08, 0.08, (self._nFeat, 1))
             b = np.zeros(1)
             self._parameter_shape.append(W.shape)
             self._parameter_shape.append(b.shape)
             self._parameter = roll_parameter([W, b])
         nSize = _X.shape[0]
         assert nSize >= self._batch_size, 'batch size must less or equal than X size.'
         optimizer = StochasticGradientDescent(learning_rate=self._learning_rate, batch_size=self._batch_size,
                                               max_iter=self._max_iter, is_plot_loss=self._is_plot_loss)
         # optimizer = ConjugateGradientDescent(max_iter=self._max_iter, is_plot_loss=self._is_plot_loss,
         #                                      epoches_record_loss=10)
         self._parameter = optimizer.optim(feval=self.feval, X=_X, y=_y, parameter=self._parameter)
         if self._normalize is True:
             self._norm_factor = self.ymean
         self._is_trained = True
     elif self._solve_type == 'analytic':
         self._parameter['coef'] = analytic_solution(_X, _y)
         if self._normalize is True:
             self._parameter['coef'] /= self.Xstd
             self._norm_factor = self.ymean - np.dot(self.Xmean, self._parameter['coef'])
     else:
         raise ValueError
     self._is_trained = True
Exemplo n.º 4
0
 def feval(self, parameter, X, y):
     y = np.reshape(y, (y.shape[0], 1))
     param_list = unroll_parameter(parameter, self._parameter_shape)
     W, b = param_list[0], param_list[1]
     nSize = X.shape[0]
     h = np.dot(X, W) + np.repeat(np.reshape(b, (1, b.shape[0])), X.shape[0], axis=0)
     loss = self._lossor.calculate(y, h)
     residual = h - y
     grad_W = 1. / nSize * np.dot(X.T, residual)
     grad_b = 1. / nSize * np.sum(residual)
     grad_parameter = roll_parameter([grad_W, grad_b])
     return loss, grad_parameter
Exemplo n.º 5
0
 def feval(self, parameter, X, y):
     y = np.reshape(y, (y.shape[0], 1))
     param_list = unroll_parameter(parameter, self._parameter_shape)
     W, b = param_list[0], param_list[1]
     nSize = X.shape[0]
     proj = np.dot(X, W) + np.repeat(
         np.reshape(b, (1, b.shape[0])), X.shape[0], axis=0)
     h = sigmoid(proj)
     residual = h - y
     loss = self._lossor.calculate(y, h)
     grad_W = 1. / nSize * np.dot(X.T, residual)
     grad_b = 1. / nSize * np.sum(residual)
     grad_parameter = roll_parameter([grad_W, grad_b])
     return loss, grad_parameter