Exemplo n.º 1
0
 def __init__(self, X_train, Y_train, n_neighbours=None, n_classes=None):
     self.k = n_neighbours
     self.n_c = n_classes
     self.n = len(X_train)
     self.X_train = Tensor(X_train)
     self.Y = Tensor(Y_train)
     pass
Exemplo n.º 2
0
    def train(self, X, y, iter=10):
        self.clean()

        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        # Initialize params
        learning_rate = Scalar(self.learning_rate)
        size = X.shape[1]
        no_samples = Scalar(X.shape[0])
        weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)),
                         name="weights")

        # 1. Predict
        y_pred = X.matmul(weights, name="y_pred")

        # 2. Compute cost
        cost = self.__compute_cost(y, y_pred, no_samples)

        # 3. Gradient descent - Update weight values
        for i in range(iter):
            y_pred = X.matmul(weights, name="y_pred{}".format(i))
            c = X.trans().matmul(y_pred)
            d = learning_rate.div(no_samples)
            weights = weights.sub(c.elemul(d), name="weights{}".format(i))
            cost = self.__compute_cost(y,
                                       y_pred,
                                       no_samples,
                                       name="cost{}".format(i))

        return cost, weights
Exemplo n.º 3
0
    def train(self, X, y):
        # Convert input values to RavOp tensors
        self.X = Tensor(X, name="X")
        self.y = Tensor(y, name="y")

        # Initialize params
        self.no_features = Scalar(X.shape[1], name="no_features")
        self.no_samples = Scalar(X.shape[0], name="no_samples")
        self.W = Tensor(np.zeros((self.no_features.output, 1)), name="W")
        self.b = Scalar(0, name="b")
        # self.weights = Tensor(np.random.uniform(0, 1, self.no_features).reshape((self.no_features, 1)), name="weights")

        # gradient descent learning

        for i in range(self.iterations):
            self.update_weights()

        return self

        # 1. Predict
        y_pred = X.matmul(weights, name="y_pred")

        # 2. Compute cost
        cost = self.__compute_cost(y, y_pred, no_samples)

        # 3. Gradient descent - Update weight values
        for i in range(iter):
            y_pred = X.matmul(weights, name="y_pred{}".format(i))
            c = X.transpose().matmul(y_pred)
            d = self.learning_rate.div(no_samples)
            weights = weights.sub(c.multiply(d), name="weights{}".format(i))
            cost = self.__compute_cost(y, y_pred, no_samples, name="cost{}".format(i))

        return cost, weights
Exemplo n.º 4
0
    def computing_cost(self, W, X, Y):
        """

            It will calculate the optimal parameters for W and b parameters in order to minimise the cost function.

            Parameters:
                        W = Weights
                        X = Input Features
                        Y = Target Output


            Output:
                It returns the cost
        
        """
        W = Tensor(W, name="W")
        X = Tensor(X, name="X")
        Y = Tensor(Y, name="Y")

        N = X.shape[0]
        distances = Scalar(1).sub((Y.matmul(X.dot(W))))
        # distances = 1 - Y*(np.dot(X, W))
        # max(0, distance)
        distances[distances.less(Scalar(0))] = Scalar(0)
        loss = Scalar(self.regularisation_parameter).mul(sum(distances) / N)
        # find cost
        cost = Scalar(0.5).mul((W.dot(W))).add(loss)

        return cost
Exemplo n.º 5
0
    def train(self, X, y=None):
        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        # 2. Train
        # 3. Accuracy

        row_count = X.shape[0]
        column_count = X.shape[1]
        val = np.mean(np.array(np.arange(row_count)))
        eval = float("inf")
        min_leaf = Scalar(5)

        for c in range(column_count):
            x = X.output[row_count, c]

            for r in range(row_count):
                x1 = Tensor(x)
                r1 = Scalar(x[r])

                lhs = x1.less_equal(r1)
                rhs = x1.greater(r1)

                a = lhs.matsum().less(min_leaf)
                b = rhs.matsum().less(min_leaf)
                if a.logical_or(b):
                    continue

        size = X.shape[1]
        no_samples = Scalar(X.shape[0])
        weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)),
                         name="weights")

        y_pred = X.matmul(weights)
Exemplo n.º 6
0
    def train(self, X, y, iter=10):
        # Remove old ops and start from scratch
        self.clean()

        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        # Initialize params
        learning_rate = Scalar(self._learning_rate)
        size = X.shape[1]
        no_samples = Scalar(X.shape[0])
        weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)), name="weights")

        # 1. Predict - Calculate y_pred
        y_pred = self.sigmoid(X.matmul(weights), name="y_pred")

        # 2. Compute cost
        cost = self.__compute_cost(y, y_pred, no_samples)

        for i in range(iter):
            y_pred = self.sigmoid(X.matmul(weights), name="y_pred{}".format(i))
            weights = weights.sub(learning_rate.div(no_samples).elemul(X.trans().matmul(y_pred.sub(y))),
                                  name="weights{}".format(i))
            cost = self.__compute_cost(y=y, y_pred=y_pred, no_samples=no_samples, name="cost{}".format(i))

        return cost, weights
Exemplo n.º 7
0
    def fit(self, X):
        self.n_q = len(X)
        self.X = Tensor(X)
        d_list = self.eucledian_distance(self.X)
        while d_list.status != "computed":
            pass
        #print(d_list)
        fe = d_list.foreach(operation='sort')
        sl = fe.foreach(operation='slice', begin=0, size=self.k)
        while sl.status != "computed":
            pass
        #print(sl)
        li = sl.output.tolist()
        for i in range(self.n_q):
            row = R.gather(d_list, Tensor([i])).reshape(shape=[self.n])
            while row.status != 'computed':
                pass

            #print(row)
            ind = R.find_indices(row, values=li[i])
            while ind.status != 'computed':
                pass
            #ind.foreach()
            #print(ind)
            ind = ind.foreach(operation='slice', begin=0, size=1)
            y_neighbours = R.gather(self.Y, ind)
            while y_neighbours.status != 'computed':
                pass

            print(y_neighbours)
        pass
Exemplo n.º 8
0
    def remove_less_significant_features(self, X, Y):
        """
        Removing Less significant features

        Parameters:
                    X=input features
                    Y=output
        Output:
                Less important features removed
        """

        X = Tensor(X, name="X")
        Y = Tensor(Y, name="Y")

        sl = 0.05
        regression_ols = None
        columns_dropped = Tensor([])
        for i in range(0, len(X.output.columns)):

            regression_ols = sm.OLS(Y.output, X.output).fit()
            max_col = regression_ols.pvalues.idxmax()
            max_val = regression_ols.pvalues.max()

            if Scalar(max_val).greater(Scalar(sl)):
                X.output.drop(max_col, axis='columns', inplace=True)
                columns_dropped.output = np.append(columns_dropped.output,
                                                   [max_col])
            else:
                break
        regression_ols.summary()

        return columns_dropped
Exemplo n.º 9
0
    def fit(self, X, y):
        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        self._coefficients = R.inv(X.transpose().dot(X)).dot(
            X.transpose()).dot(y, name="coefficients")
Exemplo n.º 10
0
    def update_centroids(self):

        gather = self._points.gather(R.find_indices(self._label,
                                                    Tensor([0]))).mean(axis=1)
        for i in range(1, self.k):
            ind = R.find_indices(self._label, Tensor([i]))
            gat = R.gather(self._points, ind).mean(axis=1)
            gather = R.concat(gather, gat)
        self.centroids = gather.reshape(
            shape=[self.k, len(self._points.output[0])])
Exemplo n.º 11
0
    def fit(self, X, y, n_neighbours=None, n_classes=None):
        if n_neighbours is None or n_classes is None:
            raise Exception("Required params: n_neighbours, n_classes")

        self._X = Tensor(X, name="X_train")
        self._y = Tensor(y, name="y_train")

        # Params
        self._k = n_neighbours
        self._n_c = n_classes
        self._n = len(X)
Exemplo n.º 12
0
    def fit(self, X, y, n_neighbours=None, n_classes=None):
        if n_neighbours is None or n_classes is None:
            raise Exception("Required params: n_neighbours, n_classes")

        self._X = Tensor(X, name="X_train")
        self._y = Tensor(y, name="y_train")

        # Params
        self._k = n_neighbours
        self._n_c = n_classes
        self._n = R.shape(self._X)
        while self._n.status != 'computed':
            pass
        self._n = int(self._n.output[0])
Exemplo n.º 13
0
    def train(self, X, y=None):
        # Convert input values to RavOp tensors
        X = Tensor(X, name="X")
        y = Tensor(y, name="y")

        # 2. Train
        # 3. Accuracy

        size = X.shape[1]
        no_samples = Scalar(X.shape[0])
        weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)),
                         name="weights")

        y_pred = X.matmul(weights)
Exemplo n.º 14
0
    def Stochastic_gradient_descent(self, features, outputs):
        """
        
        SGD to calculate Gradients such that only a Single points are considered to update weights

        Parameters:
                    features = Input Features
                    outputs = outputs

        Output:
                weights


        """

        features = Tensor(features, name="features")
        outputs = Tensor(outputs, name="outputs")

        max_epochs = 5000
        weights = np.zeros(features.shape[1])
        print(
            "\n\n----------------- STOCHASTIC GRADIENT DESCENT RUNNING -----------------\n\n"
        )

        nth = 0
        prev_cost = float("inf")
        print("\n Previous Cost:", prev_cost)
        cost_threshold = 0.01  # in percent
        # stochastic gradient descent
        for epoch in range(1, max_epochs):
            # shuffle to prevent repeating update cycles
            X, Y = shuffle(features, outputs)
            for ind, x in enumerate(X):
                ascent = self.calculate_cost_gradient(weights, x,
                                                      Y.output[ind])
                weights = weights.sub((Scalar(self.learning_rate).mul(ascent)))

            # convergence check on 2^nth epoch
            if epoch.equal(Scalar(2).exp(Scalar(nth))) or epoch.equal(
                    Scalar(max_epochs).sub(Scalar(1))):
                cost = self.computing_cost(weights, features, outputs)
                print("Epoch is: {} and Cost is: {}".format(epoch, cost))
                # stoppage criterion
                if abs(Scalar(prev_cost).sub(cost)).less(
                    (Scalar(cost_threshold).mul(Scalar(prev_cost)))):
                    return weights
                prev_cost = cost
                nth += 1
        return weights
Exemplo n.º 15
0
    def euclidean_distance(self, a, b):
        """ 
        
        Returns a scalar Euclidean Distance value between two points on a 2-D plane 
        
        Parameters:

                    a = Point_1 on the plane
                    b = Point_2 on the plane

        Output:

                Scalar Value for Distance between the two points.
        
        """
        a = Tensor(a, name="a")
        sq_cal = square_root(((a.sub(b)).pow(Scalar(2))).sum(axis=1))
        while sq_cal.status != "computed":
            pass
        # np.sqrt(sum((a-b)**2), axis = 1)
        # c = Scalar(2)
        # d = Scalar(4)
        # e = c.add(d)
        # print("\n\nOutput is : \n\n",e.output, "\n\n Status is : \n\n", e.status)
        # a = Tensor([[1]])
        # b = [[2.724]]
        # print("\n what is a \n", a)
        # distance = Tensor(b, name = "d_check")
        # inverse_distance = a.div(distance)
        # while inverse_distance.status != "computed":
        #     pass
        # print("\n inverse_distance_first created \n", inverse_distance)

        return sq_cal
Exemplo n.º 16
0
    def fit(self, X, k, iter=5, batch_size=None):
        inform_server()
        self.points = Tensor(X)
        self.k = k
        self.iter = iter
        self.batch_size = batch_size
        self.centroids = self.initialize_centroids()
        #self.label=self.closest_centroids(self.points,self.centroids)
        points = self.Mini_batch(self.points, batch_size=batch_size)
        label = self.closest_centroids(points, self.centroids)
        print(3)
        self.centroids = self.update_centroids(points, label)
        inform_server()
        for i in range(iter):
            print('iteration', i)
            points = self.Mini_batch(self.points, batch_size=self.batch_size)
            label = self.closest_centroids(points, self.centroids)
            self.centroids = self.update_centroids(points, label)

            inform_server()

        self.label = self.closest_centroids(self.points, self.centroids)
        while self.label.status != "computed":
            pass
        return self.label
Exemplo n.º 17
0
    def update_centroids(self, points, label):
        while label.status != 'computed':
            pass
        if 0 in label.output:
            gather = R.gather(points, R.find_indices(label,
                                                     values=[0])).mean(axis=1)
        else:
            gather = R.gather(self.centroids, Tensor([0])).expand_dims(axis=0)

        for i in range(1, self.k):
            if i in label.output:
                ind = R.find_indices(label, values=[i])
                gat = R.gather(points, ind).mean(axis=1)
            else:
                gat = R.gather(self.centroids, Tensor([i])).expand_dims(axis=0)
            gather = R.concat(gather, gat)

            while gat.status != 'computed':
                pass
        return gather.reshape(shape=[self.k, len(self.points.output[0])])
Exemplo n.º 18
0
    def score(self, X_test, y_test):
        """ 
        Used to measure performance of our algorithm

        Parameters:
                    X_test = Test data
                    y_test = Target Test Data

        Output:
                Returns the Score Value
        """
        # X_test = Tensor(X_test, name = "X_test")
        # y_test = y_test.reshape(len(y_test), 1)
        y_test = Tensor(y_test, name="y_test")
        print("\n Shape of y_test \n", y_test.shape)
        y_pred = Tensor(self.predict(X_test), name="y_pred")
        print("\n\n Prediction is ...\n\n", y_pred.output)

        return float(Scalar(sum(y_pred.equal(y_test)))) / float(
            Scalar(len(y_test)))
Exemplo n.º 19
0
    def calculate_cost_gradient(self, W, X_batch, Y_batch):
        """
        
        Calculating Cost for Gradient

        Parameters:
                    X_batch = Input features in batch or likewise depending on the type of gradient descent method used
                    Y_batch = Target features in batch or likewise depending on the type of gradient descent method used

        Output:
                Weights Derivatives

        """
        W = Tensor(W, name="W")
        X_batch = Tensor(X_batch, name="X_batch")
        Y_batch = Tensor(Y_batch, name="Y_batch")

        # if type(Y_batch) == np.float64:
        #     Y_batch = np.array([Y_batch])
        #     X_batch = np.array([X_batch])

        distance = Scalar(1).sub((Y_batch.matmul(X_batch.dot(W))))
        dw = np.zeros(len(W))
        dw = Tensor(dw, name="dw")

        for ind, d in enumerate(distance.output):

            if Scalar(max(0, d)).equal(Scalar(0)):
                di = W

            else:
                di = W.sub(
                    Scalar(self.regularisation_parameter).mul(
                        Y_batch.output[ind].mul(X_batch.output[ind])))

            dw += di

        dw = dw.div(len(Y_batch))  # average

        return dw
Exemplo n.º 20
0
    def predict(self, X):
        n_q = len(X)
        X = Tensor(X)

        d_list = self.__euclidean_distance(X)
        fe = d_list.foreach(operation='sort')
        sl = fe.foreach(operation='slice', begin=0, size=self._k)
        label = R.Tensor([], name="label")

        for i in range(n_q):
            row = R.gather(d_list, Tensor([i])).reshape(shape=[self._n])
            values = sl.gather(Tensor([i])).reshape(shape=[self._k])
            print(values, row)
            ind = R.find_indices(row, values)
            ind = ind.foreach(operation='slice', begin=0, size=1)
            y_neighbours = R.gather(self._y, ind).reshape(shape=[self._k])
            label = label.concat(R.mode(y_neighbours))

        # Store labels locally
        self._labels = label

        return label
Exemplo n.º 21
0
    def fit(self, X, y):

        n_samples, n_features = X.shape
        y_ = y
        # y_ = R.where(y <= 0, -1, 1)
        self.w = R.Tensor(np.zeros(n_features))
        self.b = Scalar(0)

        for epoch in range(self.n_iters):
            print("Epoch: ",  epoch)
            for idx, x_i in enumerate(X):
                x_i = Tensor(x_i)
                y_i = Tensor([y_[idx]])
                val = y_i *  (R.dot(x_i, self.w) - self.b)
                condition = R.greater_equal(val,  Scalar(1))
                while condition.status != 'computed':
                    pass
                if condition():
                    self.w = self.w - self.lr * (Scalar(2) * self.lambda_param * self.w)
                else:
                    self.w = self.w - self.lr * (Scalar(2) * self.lambda_param * self.w - R.mul(x_i, y_i))
                    self.b = self.b - (self.lr * y_i)
Exemplo n.º 22
0
    def test_svm(self, X, Y, X_train, X_test, y_train, y_test, W):
        """
        Testing/Predict SVM

        Parameters:
                    X=input features
                    Y=output class
                    X_train = training input features
                    X_test = testing input features
                    y_train = training output
                    y_test = testing output
                    W=Weights trained

        Output:
                y_test_predicted = Predictions Made

        """

        print("**** TEST THE MODEL ****")

        y_train_predicted = Tensor([])

        X = Tensor(X, name="X")
        Y = Tensor(Y, name="Y")
        X_train = Tensor(X_train, name="X_train")
        X_test = Tensor(X_test, name="X_test")
        y_train = Tensor(y_train, name="y_train")
        y_test = Tensor(y_test, name="y_test")

        for i in range(X_train.output.shape[0]):
            yp = np.sign((X_train.output.to_numpy()[i]).dot(W))
            y_train_predicted = np.append(y_train_predicted, yp)

        y_test_predicted = Tensor([])
        for i in range(X_test.shape[0]):
            yp = np.sign((X_test.output.to_numpy()[i]).dot(W))
            y_test_predicted = np.append(y_test_predicted, yp)

        print("accuracy on test dataset: {}".format(
            accuracy_score(y_test, y_test_predicted)))
        print("recall on test dataset: {}".format(
            recall_score(y_test, y_test_predicted)))
        print("precision on test dataset: {}".format(
            recall_score(y_test, y_test_predicted)))

        return y_test_predicted
Exemplo n.º 23
0
    def __init__(self, X_train, y_train, id=None, **kwargs):
        """ 
        
        Called as soon as the object of KNN class is created.

        Parameters:

                    X_train = Input Training Data without the Label(Target)
                    y_train = Target Label from Training Data
                    n_neighbours = Number of Neighbours to be considered in KNN. Default Value is 5.
                    weights = Weights assigned to the distance based calculation. Default Value is "uniform"


        """

        super().__init__(id=id, **kwargs)
        self.__setup_logger()
        # defining hyperparameters
        # X_train = args.get("X_train", None)
        # self.X_train = Tensor(X_train, name = "X_train")
        self.X_train = Tensor(X_train, name="X_train")
        print("\n Shape of y_test \n", X_train.shape)

        # self.y_train = y_train.reshape(len(y_train),1)
        self.y_train = y_train
        # self.y_train = Tensor(y_train, name = "y_train")
        print("\n Shape of y_train is \n", y_train.shape)
        # y_train = args.get("y_train", None)
        # self.y_train = Tensor(y_train, name = "y_train")

        self.n_neighbours = kwargs.get("n_neighbours", None)
        if self.n_neighbours is None:
            self.n_neighbours = 5

        self.weights = kwargs.get("weights", None)
        if self.weights is None:
            self.weights = "uniform"

        self.n_classes = kwargs.get("n_classes", None)
        if self.n_classes is None:
            self.n_classes = 3
Exemplo n.º 24
0
    def train_svm(self, X, Y, X_train, X_test, y_train, y_test):
        """
        Training SVM

        Parameters:
                    X=input features
                    Y=output class
                    X_train = training input features
                    X_test = testing input features
                    y_train = training output
                    y_test = testing output

        Output:
                Trained Weights

        """
        X = Tensor(X, name="X")
        Y = Tensor(Y, name="Y")
        y_train = Tensor(y_train, name="y_train")
        y_test = Tensor(y_test, name="y_test")

        # insert 1 in every row for intercept b
        X_train.insert(loc=len(X_train.columns), column='intercept', value=1)
        X_test.insert(loc=len(X_test.columns), column='intercept', value=1)

        X_train = Tensor(X_train, name="X_train")
        X_test = Tensor(X_test, name="X_test")

        # train the model
        print("***** TRAINING IS STARTED ****")
        W = self.Stochastic_gradient_descent(X_train.output.to_numpy(),
                                             y_train.output.to_numpy())
        # above operation's aim is to return us the optimised weights for OPTIMISATION problem.
        print("**** TRAINING COMPLETED ****")
        print("WEIGHTS ARE AS FOLLOWS: {}".format(W))

        return X, Y, X_train, X_test, y_train, y_test, W
Exemplo n.º 25
0
 def fit(self, X_Train, Y_train):
     self.X = Tensor(X_Train)
     self.Y = Tensor(Y_train)
     pass
Exemplo n.º 26
0
    def predict(self, X_test):
        """ 
        predict the data

        Parameters:
                    X_test = Data on which prediction has to be made

        Output:
                Gives you the Prediction

        """

        if self.weights == "uniform":
            neighbours = self.KNN_neighbours(X_test)
            print("\n neighbours \n", neighbours, "\n data type \n",
                  type(neighbours))
            # neighbours is a Tensor, use neighbours.output for converting to nd array
            # to understand bincount(), visit - https://i.stack.imgur.com/yAwym.png
            # print("\n\n what is neighbours here ...? \n\n", neighbours, "\n\n  What is the Data Type of Neighbours?\n\n", type(neighbours))
            # y_train_array = self.y_train

            y_pred = ([
                np.argmax(np.bincount(self.y_train[neighbour]))
                for neighbour in neighbours
            ])
            # y_pred = Tensor(y_pred, name = "y_pred_from uniform weights")

            print("\n y_pred \n", y_pred, "\n data type \n", type(y_pred))

            return y_pred

        if self.weights == "distance":

            # N nearest neighbours distance and indexes
            distance, neighbour_index = self.KNN_neighbours(
                X_test, return_distance=True)
            # X_test is array here not Tensor but returned variables are Tensors
            # print("\n distance \n", distance, "\n neighbour_index \n", neighbour_index, "\ndistance type\n", type(distance)
            #       , "\n neighbour_index data type \n", type(neighbour_index))
            # distance_demo = Tensor(distance, name = "distance_in_inverse")
            # from here it does not work..
            a = Scalar(1)
            # d = Scalar(4)
            # e = d.add(a)
            # while e.status != "computed":
            #     pass
            # print("\n\nOutput is : \n\n",e.output, "\n\n Status is : \n\n", e.status)
            # a = Tensor([[1]])
            # print("\n what is a \n", a)
            print("\n Data type of distance before converting to Tensor \n",
                  type(distance))
            distance = Tensor(distance, name="distance tensor")
            print("\n distance being converetd to Tensor: \n", distance)
            print("\n\n Shape of New Tensor Created \n\n", distance.shape)
            inverse_distance = a.div(distance)
            while inverse_distance.status != "computed":
                pass
            print("\n inverse_distance_first created \n", inverse_distance)

            mean_inverse_distance = inverse_distance.div(
                inverse_distance.sum(axis=1).output[:, np.newaxis])
            while mean_inverse_distance.status != "computed":
                pass

            print("\n mean_inverse_distance", mean_inverse_distance,
                  "data type of mean_inverse_distance",
                  type(mean_inverse_distance))

            mean_inverse_distance = Tensor(mean_inverse_distance,
                                           name="mean_inverse_distance")

            proba = []

            # running loop on K nearest neighbours elements only and selecting train for them
            for i, row in enumerate(mean_inverse_distance.output):

                row_pred = self.y_train[neighbour_index.output[i]]
                print("\n row_pred \n", row_pred, " \n data type \n",
                      type(row_pred))

                for k in range(self.n_classes):
                    indices = np.where(
                        (Tensor(row_pred, name="row_pred").equal(k)).output)
                    while indices.status != "computed":
                        pass
                    print("\n indices \n", indices, " \n data type \n",
                          type(indices))
                    prob_ind = sum(row[indices])
                    print("\n prob_ind", prob_ind, "\n data type \n",
                          type(prob_ind))
                    proba.append(Tensor(prob_ind, name="prob_ind").output)
                    print(proba, "proba")

            predict_proba = Tensor(proba, name="proba").reshape(
                Scalar(X_test.shape[0]), self.n_classes)
            print("\n predict_proba \n", predict_proba, "\n data type \n",
                  type(predict_proba))
            y_pred = Tensor(
                [argmax(Scalar(item)) for item in predict_proba.output],
                name="y_pred")
            print("\n y_pred \n", y_pred, "\n data type \n", type(y_pred))

            return y_pred
Exemplo n.º 27
0
 def predict(self, X):
     
     X = Tensor(X)
     approx = R.dot(X, self.w) - self.b
     return R.sign(approx)
Exemplo n.º 28
0
 def predict(self, X):
     """
     Predict
     """
     X = Tensor(X, name="X")
     return X.dot(self._coefficients)