def _d_predict_d_params(self, X, predicts):
     X_affine = data_helper.affine_X(X)
     dp0 = X_affine * (predicts[:, 0] *
                       (1.0 - predicts[:, 0]))[:, np.newaxis]
     out = np.zeros((X_affine.shape[0], 2) + self._get_params()[0].shape,
                    dtype=np.float64)
     out[:, 0, :] = dp0
     out[:, 1, :] = -dp0
     return [out]
 def grad_f(params_dict, k, X):
     k_eq_0_out = f(params_dict, 0, X)
     X_affine = data_helper.affine_X(X)
     grad_k_eq_0_out = (k_eq_0_out *
                        (1 - k_eq_0_out))[:, np.newaxis] * X_affine
     return {
         "theta": grad_k_eq_0_out
     } if k == 0 else {
         "theta": -grad_k_eq_0_out
     }
 def predict(self, X):
     #clean the single input vs. multi input vs. not-affined input handling up a lot
     if len(X.shape) == 1:
         X = np.array([X])
     assert (X.shape[1] == self.__theta.shape[0]
             or X.shape[1] == self.__theta.shape[0] - 1)
     if X.shape[1] != self.__theta.shape[0]:
         X = data_helper.affine_X(X)
     out = self.__s(np.dot(X, self.__theta), 0)
     return out[0] if X.shape[0] == 1 else out
Exemplo n.º 4
0
 def _d_predict_d_theta(self, X, predicts):
     X_affine = data_helper.affine_X(X)
     d_kernel_d_theta = self._d_kernel_d_v2(X_affine, self._theta,
                                            self.__bias, self.__degree)
     dp0 = d_kernel_d_theta * (predicts[:, 0] *
                               (1.0 - predicts[:, 0]))[:, np.newaxis]
     out = np.zeros((X.shape[0], 2) + (self._theta.shape), dtype=np.float64)
     out[:, 0, :] = dp0
     out[:, 1, :] = -dp0
     return out
 def train(self, X, y, steps, step_size):
     X = data_helper.affine_X(X)
     self.__rand_init_theta(X.shape[1])
     unique_labels = np.unique(y)
     for iter in range(steps):
         grad = self.__gradient(X, y, unique_labels)
         self.__theta -= step_size * grad
         if iter % 1000 == 0:
             print("iter: ", iter)
             print("expected gini: ", self.expected_gini(X, y))
             print("actual gini: ", self.gini(X, y))
             print("------------------------------------------")
        def grad_f_A(params_dict, k, X, t_X, d_f_0_out):
            X_affine = data_helper.affine_X(X)
            d_t_X = act_func.derivative_wrt_activation(t_X)

            X_affine_times_d_f_0_out = X_affine * d_f_0_out[:, np.newaxis]
            k_0_out = np.zeros((X.shape[0], ) + params_dict["A"].shape)
            #want to speed this up, but can't think of any way to
            for k1 in range(k_0_out.shape[1]):
                k_0_out[:,k1,:] = params_dict["theta"][k1] * \
                    X_affine_times_d_f_0_out *\
                    d_t_X[:,k1][:,np.newaxis]

            k_0_out[:, :, k_0_out.shape[2] - 1] *= TRANSFORM_BIAS_DAMPEN
            k_0_out *= TRANSFORM_DAMPEN
            return k_0_out if k == 0 else -k_0_out
        def grad_f_A(params_dict, k, X, t_X, f_0_out):
            X_affine = data_helper.affine_X(X)
            d_t_X = act_func.derivative_wrt_activation(
                t_X)  #d_t_X[i,j] is the derivative of act_func w.r.t. t_X[i,j]
            k_0_out = np.zeros((X.shape[0], ) + params_dict["A"].shape)
            for k in range(k_0_out.shape[1]):
                for r in range(k_0_out.shape[2]):
                    k_0_out[:,k,r] = params_dict["theta"][k] * \
                        X_affine[:,r] * \
                        (f_0_out*(1-f_0_out)) * \
                        d_t_X[:,k]

            k_0_out[:, :, k_0_out.shape[2] - 1] *= TRANSFORM_BIAS_DAMPEN_FACTOR
            k_0_out *= A_GRAD_DAMPEN_FACTOR
            return k_0_out if k == 0 else -k_0_out
 def f(params_dict, k, X):
     X_affine = data_helper.affine_X(X)
     k_eq_0_out = stable_func.sigmoid(
         np.dot(X_affine, params_dict["theta"]))
     return k_eq_0_out if k == 0 else 1 - k_eq_0_out
 def t(params_dict, X):
     X_affine = data_helper.affine_X(X)
     A_Xs = np.dot(params_dict["A"], X_affine.T).T
     assert (A_Xs.shape[0] == X_affine.shape[0])
     return data_helper.affine_X(act_func.act(A_Xs))
Exemplo n.º 10
0
 def predict(self, X):
     X_affine = data_helper.affine_X(X)
     p0 = stable_func.sigmoid(
         self._kernel(X_affine, self._theta, self.__bias, self.__degree))
     return np.column_stack([p0, 1 - p0])
 def predict(self, X):
     X_affine = data_helper.affine_X(X)
     p0 = stable_func.sigmoid(np.dot(X_affine, self._get_params()[0]))
     return np.column_stack([p0, 1 - p0])
Exemplo n.º 12
0
 def __dot_prepare_transform(self, transformed_X):
     return data_helper.affine_X(transformed_X)#transformed_X
Exemplo n.º 13
0
 def __transform_prepare_X(self, X):
     return data_helper.affine_X(X)
Exemplo n.º 14
0
import model.impurity.global_impurity3.node_model3_maker as node_model3_maker
from model.impurity.global_impurity3.global_impurity_node3 import GlobalImpurityNode3
import sklearn.datasets as datasets
import toolbox.data_helper as data_helper
import timeit
import numpy as np
from model.impurity.global_impurity3.global_impurity_model_tree3 import GlobalImpurityModelTree3

np.random.seed(seed = 42)
X,y = datasets.load_digits(return_X_y = True)#datasets.load_iris(return_X_y = True)#
FEATURES = range(X.shape[1])#[0,1]
X = X[:, FEATURES]
X = X.astype(np.float64)
X/=16.0
NUM_POINTS = X.shape[0]

X = X[0:NUM_POINTS,:]
y = y[:NUM_POINTS]
X = data_helper.affine_X(X)
unique_labels = np.unique(y)
tree = GlobalImpurityModelTree3(node_model3_maker.logistic_model_at_depth(X.shape[1]))


tree.train(X, y, 10.0, 100000, 1, 10, 0, 0, 5, iters_per_prune = 5, print_progress_iters = 5)