Exemplo n.º 1
0
    def __init__(self, my_lambda=0, file_name="dump.txt"):
        super().__init__()
        statistic = Statistic(output=file_name)
        x, y, self.m = statistic.get_training_set()

        self.X = x[:, :]
        self.Y = y[:]

        self.m = len(self.Y)
        self.output_layer_size = 3

        # reading the data
        # data = loadmat('ex4data1.mat')
        # self.X = data['X'][0:500, :]
        # self.Y = data['y'][0:500, :]
        # self.m = len(self.Y)
        # self.output_layer_size = 10

        self.my_lambda = my_lambda
        self.input_layer_size = self.X.shape[1]
        self.internal_layer_size = 25

        self.theta1 = None
        self.theta2 = None
        self.w1 = None
        self.w2 = None
        self.weights = None
        self.j = []
        self._randomize_thetas()
Exemplo n.º 2
0
class LogisticRegression(object):

    def __init__(self, class_range=3, file_name="", my_lambda=1):
        self.X = None
        self.Y = None
        self.weights = None
        self.class_range = class_range
        self.my_lambda = my_lambda
        self.cost_data = [[], [], []]
        self.stat = Statistic(output=file_name)
        self.X, self.Y, self.m = self.stat.get_training_set()
        self.weights = np.zeros([self.class_range, self.X.shape[1]])

    def cost_function(self, initial_theta, X, y, i):
        zero_theta = initial_theta.copy()
        zero_theta[0] = 0
        reg = (self.my_lambda * np.power(zero_theta, 2).sum())/2
        sig = sigmoid(np.matmul(X, initial_theta))
        cost = (-np.matmul(np.transpose(y), np.log(sig)) - np.matmul(np.transpose(1 - y), np.log(1 - sig)) + reg) / self.m
        self.cost_data[i].append(cost)

        return cost

    def gradient(self, initial_theta, X, y, i):
        zero_theta = initial_theta.copy()
        zero_theta[0] = 0
        sig = sigmoid(np.matmul(X, initial_theta))
        reg = self.my_lambda * zero_theta
        grad = (np.matmul(np.transpose(X), sig - y) + reg) / self.m
        return grad

    def optimize_one(self, index):
        initial_theta = np.zeros(len(self.weights[index]))
        y = self.Y == index
        y = y.astype(int)
        result = op.minimize(fun=self.cost_function,
                             x0=initial_theta,
                            args=(self.X, y, index),
                            method='TNC',
                            jac=self.gradient)
        print(result)
        return result.x

    def optimize(self):
        t = list()
        for i in range(self.class_range):
            t.append(self.optimize_one(i))

        self.weights = np.asarray(t)
        return self.weights

    def predict(self, x):
        w = np.matmul(self.weights, x).tolist()
        print(w, w.index(max(w)))
        return w.index(max(w))