コード例 #1
0
    def computing_cost(self, W, X, Y):
        """

            It will calculate the optimal parameters for W and b parameters in order to minimise the cost function.

            Parameters:
                        W = Weights
                        X = Input Features
                        Y = Target Output


            Output:
                It returns the cost
        
        """
        W = Tensor(W, name="W")
        X = Tensor(X, name="X")
        Y = Tensor(Y, name="Y")

        N = X.shape[0]
        distances = Scalar(1).sub((Y.matmul(X.dot(W))))
        # distances = 1 - Y*(np.dot(X, W))
        # max(0, distance)
        distances[distances.less(Scalar(0))] = Scalar(0)
        loss = Scalar(self.regularisation_parameter).mul(sum(distances) / N)
        # find cost
        cost = Scalar(0.5).mul((W.dot(W))).add(loss)

        return cost
コード例 #2
0
    def calculate_cost_gradient(self, W, X_batch, Y_batch):
        """
        
        Calculating Cost for Gradient

        Parameters:
                    X_batch = Input features in batch or likewise depending on the type of gradient descent method used
                    Y_batch = Target features in batch or likewise depending on the type of gradient descent method used

        Output:
                Weights Derivatives

        """
        W = Tensor(W, name="W")
        X_batch = Tensor(X_batch, name="X_batch")
        Y_batch = Tensor(Y_batch, name="Y_batch")

        # if type(Y_batch) == np.float64:
        #     Y_batch = np.array([Y_batch])
        #     X_batch = np.array([X_batch])

        distance = Scalar(1).sub((Y_batch.matmul(X_batch.dot(W))))
        dw = np.zeros(len(W))
        dw = Tensor(dw, name="dw")

        for ind, d in enumerate(distance.output):

            if Scalar(max(0, d)).equal(Scalar(0)):
                di = W

            else:
                di = W.sub(
                    Scalar(self.regularisation_parameter).mul(
                        Y_batch.output[ind].mul(X_batch.output[ind])))

            dw += di

        dw = dw.div(len(Y_batch))  # average

        return dw
コード例 #3
0
 def predict(self, X):
     """
     Predict
     """
     X = Tensor(X, name="X")
     return X.dot(self._coefficients)