Exemplo n.º 1
0
    def train(self, X, y, iter=10):
        self.clean()

        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        # Initialize params
        learning_rate = Scalar(self.learning_rate)
        size = X.shape[1]
        no_samples = Scalar(X.shape[0])
        weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)),
                         name="weights")

        # 1. Predict
        y_pred = X.matmul(weights, name="y_pred")

        # 2. Compute cost
        cost = self.__compute_cost(y, y_pred, no_samples)

        # 3. Gradient descent - Update weight values
        for i in range(iter):
            y_pred = X.matmul(weights, name="y_pred{}".format(i))
            c = X.trans().matmul(y_pred)
            d = learning_rate.div(no_samples)
            weights = weights.sub(c.elemul(d), name="weights{}".format(i))
            cost = self.__compute_cost(y,
                                       y_pred,
                                       no_samples,
                                       name="cost{}".format(i))

        return cost, weights
Exemplo n.º 2
0
    def train(self, X, y, iter=10):
        # Remove old ops and start from scratch
        self.clean()

        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        # Initialize params
        learning_rate = Scalar(self._learning_rate)
        size = X.shape[1]
        no_samples = Scalar(X.shape[0])
        weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)), name="weights")

        # 1. Predict - Calculate y_pred
        y_pred = self.sigmoid(X.matmul(weights), name="y_pred")

        # 2. Compute cost
        cost = self.__compute_cost(y, y_pred, no_samples)

        for i in range(iter):
            y_pred = self.sigmoid(X.matmul(weights), name="y_pred{}".format(i))
            weights = weights.sub(learning_rate.div(no_samples).elemul(X.trans().matmul(y_pred.sub(y))),
                                  name="weights{}".format(i))
            cost = self.__compute_cost(y=y, y_pred=y_pred, no_samples=no_samples, name="cost{}".format(i))

        return cost, weights
Exemplo n.º 3
0
    def computing_cost(self, W, X, Y):
        """

            It will calculate the optimal parameters for W and b parameters in order to minimise the cost function.

            Parameters:
                        W = Weights
                        X = Input Features
                        Y = Target Output


            Output:
                It returns the cost
        
        """
        W = Tensor(W, name="W")
        X = Tensor(X, name="X")
        Y = Tensor(Y, name="Y")

        N = X.shape[0]
        distances = Scalar(1).sub((Y.matmul(X.dot(W))))
        # distances = 1 - Y*(np.dot(X, W))
        # max(0, distance)
        distances[distances.less(Scalar(0))] = Scalar(0)
        loss = Scalar(self.regularisation_parameter).mul(sum(distances) / N)
        # find cost
        cost = Scalar(0.5).mul((W.dot(W))).add(loss)

        return cost
Exemplo n.º 4
0
    def train(self, X, y=None):
        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        # 2. Train
        # 3. Accuracy

        row_count = X.shape[0]
        column_count = X.shape[1]
        val = np.mean(np.array(np.arange(row_count)))
        eval = float("inf")
        min_leaf = Scalar(5)

        for c in range(column_count):
            x = X.output[row_count, c]

            for r in range(row_count):
                x1 = Tensor(x)
                r1 = Scalar(x[r])

                lhs = x1.less_equal(r1)
                rhs = x1.greater(r1)

                a = lhs.matsum().less(min_leaf)
                b = rhs.matsum().less(min_leaf)
                if a.logical_or(b):
                    continue

        size = X.shape[1]
        no_samples = Scalar(X.shape[0])
        weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)),
                         name="weights")

        y_pred = X.matmul(weights)
Exemplo n.º 5
0
    def train(self, X, y=None):
        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        # 2. Train
        # 3. Accuracy

        size = X.shape[1]
        no_samples = Scalar(X.shape[0])
        weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)),
                         name="weights")

        y_pred = X.matmul(weights)
Exemplo n.º 6
0
    def calculate_cost_gradient(self, W, X_batch, Y_batch):
        """
        
        Calculating Cost for Gradient

        Parameters:
                    X_batch = Input features in batch or likewise depending on the type of gradient descent method used
                    Y_batch = Target features in batch or likewise depending on the type of gradient descent method used

        Output:
                Weights Derivatives

        """
        W = Tensor(W, name="W")
        X_batch = Tensor(X_batch, name="X_batch")
        Y_batch = Tensor(Y_batch, name="Y_batch")

        # if type(Y_batch) == np.float64:
        #     Y_batch = np.array([Y_batch])
        #     X_batch = np.array([X_batch])

        distance = Scalar(1).sub((Y_batch.matmul(X_batch.dot(W))))
        dw = np.zeros(len(W))
        dw = Tensor(dw, name="dw")

        for ind, d in enumerate(distance.output):

            if Scalar(max(0, d)).equal(Scalar(0)):
                di = W

            else:
                di = W.sub(
                    Scalar(self.regularisation_parameter).mul(
                        Y_batch.output[ind].mul(X_batch.output[ind])))

            dw += di

        dw = dw.div(len(Y_batch))  # average

        return dw