Exemple #1
0
    def predict(self, X):

        """
        Predict the output class
        """

        MAPs = []

        for index, row in enumerate(X):
            
            joint_proba = {}

            for class_name, features in self.class_summary.items():
                total_features = len(features['summary'])
                likelihood = R.Scalar(1)

                for idx in range(total_features):
                    # print("feature: ", row[idx])
                    feature = R.Scalar(row[idx])
                    mean = features['summary'][idx]['mean']
                    stdev = features['summary'][idx]['std']
                    normal_proba = self.distribution(feature, mean, stdev)
                    likelihood = likelihood * normal_proba

                prior_proba = R.Scalar(features['prior_proba'])

                my_val = prior_proba * likelihood

                joint_proba[class_name] = prior_proba * likelihood
            MAPs.append(joint_proba)

        return MAPs
Exemple #2
0
    def grow_tree(self, X, y, depth=0):
        pop_per_class = R.Tensor([])
        for c in range(self.num_classes):
            pop_per_class = pop_per_class.concat(
                R.sum(R.equal(y, R.Scalar(c))).expand_dims())
        predicted_class = R.argmax(pop_per_class)
        node = Node(predicted_class=predicted_class, depth=depth)
        node.samples = R.shape(y).gather(R.Scalar(0))

        if depth < self.max_depth:
            #col, threshold = self.find_split(X, y)
            col, threshold = 0, R.Tensor([12.895])
            '''
            
            '''
            decision = R.Scalar(col).logical_and(threshold)
            while decision.status != "computed":
                pass
            if decision.output == 1:
                indices_left = X.transpose().gather(
                    R.Scalar(col)).less(threshold)
                X_left, y_left = X.gather(indices_left), y.gather(indices_left)
                indices_right = X.transpose().gather(
                    R.Scalar(col)).greater_equal(threshold)
                X_right, y_right = X.gather(indices_right), y.gather(
                    indices_right)
                node.feature_index = col
                node.threshold = threshold
                node.left = self.grow_tree(X_left, y_left, depth + 1)
                node.left.leftbranch = True
                node.right = self.grow_tree(X_right, y_right, depth + 1)
                node.right.rightbranch = True
        return node
Exemple #3
0
 def fit(self, X, y):
     X = R.Tensor(X)
     y = R.Tensor(y)
     num_classes = y.unique().shape_().gather(R.Scalar(0))
     num_features = X.shape_().gather(R.Scalar(1))
     while num_features.status != 'computed':
         pass
     self.num_classes = int(num_classes.output)
     self.num_features = int(num_features.output)
     self.tree = self.grow_tree(X, y)
Exemple #4
0
def categorical_hinge(y_true, y_pred):
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    neg = R.max(R.elemul(R.sub(R.Scalar(-1), y_true), y_pred))
    pos = R.sum(R.elemul(y_true, y_pred))
    loss = R.max((R.sub(neg, pos), R.Scalar(1)), R.Scalar(0))

    return loss
Exemple #5
0
def one_hot_cross_entropy(y_true, y_pred, with_logit=True):
    if with_logit:
        y_pred = softmax(y_pred)

    else:
        pass

    y_pred = R.clip(y_pred, R.epsilon(), R.div(R.Scalar(1), R.epsilon()))
    N = y_pred.shape[0]
    loss = R.div(R.elemul(R.Scalar(-1), R.mul(R.sum(y_true, R.natlog(R.add(y_pred, 1e-9))))), R.Scalar(N))

    return loss
Exemple #6
0
def log_loss(y_true, y_pred, with_logit=True):
    if with_logit:
        y_pred = sigmoid(y_pred)

    else:
        pass

    y_pred = R.clip(y_pred, R.epsilon(), R.sub(R.Scalar(1), R.epsilon()))
    loss = R.elemul(R.Scalar(-1), R.mean(R.elemul(y_true, R.natlog(y_pred)),
                                         R.elemul((R.sub(R.Scalar(1), y_true)), R.natlog(R.sub(R.Scalar(1), y_pred)))))

    return loss
Exemple #7
0
def sparse_cross_entropy(y_true, y_pred, with_logit=True):
    if with_logit:
        y_pred = softmax(y_pred)

    else:
        pass

    y_pred = R.clip(y_pred, R.epsilon(), R.div(R.Scalar(1), R.epsilon()))
    N = y_pred.shape[0]
    loss = R.elemul(R.Scalar(-1), R.div(R.sum(R.natlog(y_pred[R.len(y_pred), y_true])), R.Scalar(N)))

    return loss
Exemple #8
0
    def distribution(self, x, mean, std):

        """
        Gaussian Distribution Function
        """ 
        numerator = R.square(x - mean)
        denominator = R.Scalar(2) * R.square(std)
        frac = R.div(numerator,denominator)
        exponent = R.exp(R.Scalar(-1) * frac)
        two_pi = R.Scalar(2) *  R.pi()
        gaussian_denominator = R.square_root(two_pi) * std
        gaussian_func = R.div(exponent, gaussian_denominator)
        return gaussian_func
Exemple #9
0
def huber(y_true, y_pred, d):
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    d = R.Scalar(d)
    x = R.sub(y_true, y_pred)

    if R.abs(x) <= d:
        return R.elemul(R.Scalar(d), R.elemul(x, x))

    if R.abs(x) > d:
        return R.add(R.elemul(R.Scalar(d), R.mul(d, d)), R.elemul(d, R.sub(R.abs(x), d)))
Exemple #10
0
def train(x, Y, w1, w2, alpha=0.01, epoch=10):
    acc = []
    losses = []
    for j in range(epoch):
        loss_sum = R.Scalar(0)
        for i in range(len(x)):
            out = f_forward(x[i], w1, w2)
            loss_sum = loss_sum.add(loss(out, Y[i]))
            w1, w2 = back_prop(x[i], y[i], w1, w2, alpha)
        print('Epoch : ', j + 1)
        acc.append(
            R.Scalar(1).sub(loss_sum.div(R.Scalar(len(x)))).multiply(
                R.Scalar(100)))
        losses.append(loss_sum.div(R.Scalar(len(x))))
    return (acc, losses, w1, w2)
Exemple #11
0
    def distribution(self, x, mean, std):

        """
        Gaussian Distribution Function
        exponent = np.exp(-((x-mean)**2 / (2*std**2)))
        gauss_func = exponent / (np.sqrt(2*np.pi)*std)
        """ 
        numerator = R.square(x - mean)
        denominator = R.Scalar(2) * R.square(std)
        frac = R.div(numerator,denominator)
        exponent = R.exp(R.Scalar(-1) * frac)
        two_pi = R.Scalar(2) *  R.Scalar(3.141592653589793)
        gaussian_denominator = R.square_root(two_pi) * std
        gaussian_func = R.div(exponent, gaussian_denominator)
        return gaussian_func
 def __init__(self,x_points,y_points,theta):
     self.raw_X = x_points
     self.raw_y = y_points
     self.m = R.Scalar(self.raw_y.shape[0])
     self.X = R.Tensor(self.raw_X.tolist())
     self.y = R.Tensor(self.raw_y.tolist())
     self.theta = R.Tensor(theta.tolist())
Exemple #13
0
    def fit(self, X, y):
        if self.fit_intercept:
            X = self.__add_intercept(X)
        self.leny = len(y)
        Y = R.Tensor(y)
        # weights initialization
        self.theta = R.Tensor([0] * self.x_shape1)

        for i in range(self.num_iter):
            h = self.__sigmoid(X.dot(self.theta))
            while h.status != 'computed':
                pass
            w = X.transpose()
            while w.status != 'computed':
                pass
            self.theta = self.theta.sub(
                self.lr.multiply((w.dot(h.sub(Y)).div(R.Scalar(self.leny)))))
            while self.theta.status != 'computed':
                pass
            loss = self.__loss(self.__sigmoid(X.dot(self.theta)), Y)
            while loss.status != 'computed':
                pass
            if (self.verbose == True):
                self.losses.append(loss)
            print('Iteration : ', i)
Exemple #14
0
    def grow_tree(self, X, y, depth=0):
        pop_per_class = R.Tensor([])
        for c in range(self.num_classes):
            pop_per_class = pop_per_class.concat(
                R.sum(R.equal(y, R.Scalar(c))).expand_dims())
        predicted_class = R.argmax(pop_per_class)
        while predicted_class.status != "computed":
            pass

        node = Node(predicted_class=predicted_class.output, depth=depth)
        node.samples = R.shape(y).gather(R.Scalar(0))
        if depth < self.max_depth:
            col, threshold = self.find_split(X, y)
            while threshold.status != "computed":
                pass
            z = X.shape_()
            z1 = y.shape_()
            while z1.status != "computed":
                pass
            if col is not None and threshold.output is not [None]:
                indices_left = X.transpose().gather(
                    R.Scalar(col)).less(threshold)
                X_left = X.gather(
                    R.find_indices(indices_left, R.Tensor(
                        [1])).reshape(shape=R.sum(indices_left).expand_dims()))
                y_left = y.gather(
                    R.find_indices(indices_left, R.Tensor(
                        [1])).reshape(shape=R.sum(indices_left).expand_dims()))

                indices_right = X.transpose().gather(
                    R.Scalar(col)).greater_equal(threshold)
                X_right = X.gather(
                    R.find_indices(indices_right, R.Tensor([
                        1
                    ])).reshape(shape=R.sum(indices_right).expand_dims()))
                y_right = y.gather(
                    R.find_indices(indices_right, R.Tensor([
                        1
                    ])).reshape(shape=R.sum(indices_right).expand_dims()))
                node.feature_index = col
                node.threshold = threshold

                node.left = self.grow_tree(X_left, y_left, depth + 1)
                node.left.leftbranch = True
                node.right = self.grow_tree(X_right, y_right, depth + 1)
                node.right.rightbranch = True
        return node
Exemple #15
0
 def __init__(self, lr=0.01, num_iter=10, fit_intercept=True, verbose=True):
     self.lr = R.Scalar(lr)
     self.num_iter = num_iter
     self.fit_intercept = fit_intercept
     self.verbose = verbose
     self.x_shape1 = None
     self.losses = []
     self.preds = None
Exemple #16
0
def f1_score(true_labels, pred_labels):
    if not isinstance(true_labels, R.Tensor):
        y_true = R.Tensor(true_labels)
    if not isinstance(pred_labels, R.Tensor):
        pred_labels = R.Tensor(pred_labels)
    pre = precision(true_labels, pred_labels)
    rec = recall(true_labels, pred_labels)
    return R.div(R.multiply(R.Scalar(2), R.multiply(pre, rec)),
                 R.add(pre, rec))
Exemple #17
0
def mean_squared_error(y_true, y_pred):
    """
    Mean Squared Error
    """
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    return R.mean(R.pow(R.sub(y_true, y_pred), R.Scalar(2)))
Exemple #18
0
def root_mean_squared_error(y_true, y_pred):
    """
    Root Mean Squared Error
    """
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    return R.pow(mean_squared_error(y_true, y_pred), R.Scalar(0.5))
Exemple #19
0
def pearson_correlation(x, y):
    """
    Calculate linear correlation(pearson correlation)
    """

    if not isinstance(x, R.Tensor):
        x = R.Tensor(x)
    if not isinstance(y, R.Tensor):
        y = R.Tensor(y)

    a = R.sum(R.square(x))
    b = R.sum(R.square(y))

    n = a.output.shape[0]

    return R.div(
        R.sub(R.multiply(R.Scalar(n), R.sum(R.multiply(x, y))),
              R.multiply(R.sum(x), R.sum(y))),
        R.multiply(
            R.square_root(R.sub(R.multiply(R.Scalar(n), a), R.square(b))),
            R.square_root(R.sub(R.multiply(R.Scalar(n), b), R.square(b)))))
Exemple #20
0
 def fit(self, X_train, y_train, alpha=0.01, epoch=1):
     self.x, self.y = self.preprocess(X_train, y_train)
     self.leny = len(self.y)
     print('Starting to Train...')
     for j in range(epoch):
         loss_sum = R.Scalar(0)
         for i in range(len(self.x)):
             out = self.f_forward(self.x[i], self.w1, self.w2)
             loss_sum = loss_sum.add(self.loss(out, self.y[i]))
             self.w1, self.w2 = self.back_prop(self.x[i], self.y[i],
                                               self.w1, self.w2, alpha)
         print('Epoch : ', j + 1)
         self.acc.append(
             R.Scalar(1).sub(loss_sum.div(R.Scalar(len(self.x)))).multiply(
                 R.Scalar(100)))
         self.losses.append(loss_sum.div(R.Scalar(len(self.x))))
     while self.w1.status != 'computed':
         pass
     while self.w2.status != 'computed':
         pass
     print('Training Complete!')
def r2_score(y_true, y_pred):

  if not isinstance(y_true, R.Tensor):
      y_true = R.Tensor(y_true)
  if not isinstance(y_pred, R.Tensor):
      y_pred = R.Tensor(y_pred)    
  
  scalar1 = R.Scalar(1)    
        
  SS_res = R.sum(R.square(R.sub(y_true, y_pred)))
  SS_tot = R.sum(R.square(R.sub(y_true, R.mean(y_true))))  

  return R.sub(scalar1, R.div(SS_res, R.add(SS_tot, R.epsilon())))
Exemple #22
0
def mean_squared_log_error(y_true, y_pred):
    """
    Mean Squared Log Error
    """
    if not isinstance(y_true, R.Tensor):
        y_true = R.Tensor(y_true)
    if not isinstance(y_pred, R.Tensor):
        y_pred = R.Tensor(y_pred)

    return R.mean(
        R.pow(
            R.sub(R.natlog(R.add(y_true, R.one())),
                  R.natlog(R.add(y_pred, R.one()))), R.Scalar(2)))
Exemple #23
0
def back_prop(x, y, w1, w2, alpha):
    # x,y are Tensors
    # hiden layer
    z1 = x.dot(w1)  # input from layer 1
    a1 = sigmoid(z1)  # output of layer 2
    # Output layer
    z2 = a1.dot(w2)  # input of out layer
    a2 = sigmoid(z2)  # output of out layer
    # error in output layer
    d2 = a2.sub(y)
    d3 = a1.multiply(R.Scalar(1).sub(a1))
    d4 = w2.dot(d2.transpose()).transpose()
    d1 = d3.multiply(d4)

    # Gradient for w1 and w2
    w1_adj = x.transpose().dot(d1)
    w2_adj = a1.transpose().dot(d2)

    # Updating parameters
    w1 = w1.sub(R.Scalar(alpha).multiply(w1_adj))
    w2 = w2.sub(R.Scalar(alpha).multiply(w2_adj))

    return (w1, w2)
 def gradient_descent(self, alpha, num_iters):
     alpha_ = R.Scalar(alpha)
     for e in range(num_iters):
         residual = self.X.dot(self.theta).sub(self.y)
         while residual.status != 'computed':
             pass
         temp = self.theta.sub((alpha_.div(self.m)).multiply(self.X.transpose().dot(residual)))
         while temp.status!='computed':
             pass
         self.theta = temp
         print('Iteration : ',e)
     op_theta = self.theta()
     print('Theta found by gradient descent: intercept={0}, slope={1}'.format(op_theta[0],op_theta[1]))
     return self.theta, op_theta[0], op_theta[1]
    def __init__(self, id=None, **kwargs):
        super().__init__(id=id, **kwargs)

        self.__setup_logger()

        # Define hyper-parameters
        self.learning_rate = R.Scalar(kwargs.get("learning_rate", 0.01), name="learning_rate")
        self.iterations = kwargs.get("iterations", 100)

        self.X = None
        self.y = None
        self.W = None
        self.b = None
        self.no_samples = None
        self.no_features = None
Exemple #26
0
def r2_score(y_true, y_pred):
    if isinstance(y_true, R.Tensor) or isinstance(y_true, R.Op):
        pass
    else:
        y_true = R.Tensor(y_true, name="y_true")

    if isinstance(y_pred, R.Tensor) or isinstance(y_pred, R.Op):
        pass
    else:
        y_pred = R.Tensor(y_pred, name="y_pred")

    print(type(y_true), type(y_pred))

    scalar1 = R.Scalar(1)

    SS_res = R.sum(R.square(R.sub(y_pred, y_true)), name="ss_res")
    SS_tot = R.sum(R.square(R.sub(y_true, R.mean(y_true))), name="ss_tot")

    return R.sub(scalar1, R.div(SS_res, SS_tot), name="r2_score")
Exemple #27
0
import time

import ravop.core as R
from ravcom import inform_server

a = R.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
b = R.Scalar(10)

c = R.add(a, b)
d = R.sub(a, b)
e = R.multiply(a, b)
f = R.mean(a)
g = R.median(a)

inform_server()

# Wait for 10 seconds
time.sleep(10)

print(c())
print(d())
print(e())
print(f())
print(g())
Exemple #28
0
    def find_split(self, X, y):
        ideal_col = None
        ideal_threshold = None

        num_observations = y.shape_().gather(R.Scalar(0))
        while num_observations.status != 'computed':
            pass
        num_observations = int(num_observations.output)
        if num_observations <= 1:
            return ideal_col, ideal_threshold

        y = y.reshape(shape=[num_observations])
        count_in_parent = R.Tensor([])
        for c in range(self.num_classes):
            count_in_parent = count_in_parent.concat(
                R.sum(R.equal(y, R.Scalar(c))).expand_dims())
        gini = R.square(
            count_in_parent.foreach(operation='div', params=num_observations))
        best_gini = R.sub(R.Scalar(1.0), R.sum(gini))
        temp_y = y.reshape(shape=[num_observations, 1])

        for col in range(self.num_features):
            temp_X = R.gather(
                R.transpose(X),
                R.Scalar(col)).reshape(shape=[num_observations, 1])
            all_data = R.concat(temp_X, temp_y, axis=1)

            column = R.gather(R.transpose(X), R.Scalar(col))
            ind = column.find_indices(R.sort(R.unique(column)))
            while ind.status != "computed":
                pass
            inform_server()
            sorted_data = R.Tensor([])
            for i in ind.output:
                sorted_data = sorted_data.concat(all_data.gather(
                    R.Tensor(i)))  # need to find another way to sort
            sorted_data_tpose = sorted_data.transpose()
            thresholds = sorted_data_tpose.gather(R.Scalar(0)).gather(
                R.Scalar(0))
            obs_classes = sorted_data_tpose.gather(R.Scalar(1)).gather(
                R.Scalar(0))

            num_left = R.Tensor([0] * self.num_classes)  # need ops
            num_right = count_in_parent
            for i in range(1, num_observations):
                class_ = R.gather(obs_classes, R.Tensor([i - 1]))
                classencoding = R.one_hot_encoding(
                    class_, depth=self.num_classes).gather(R.Scalar(0))
                num_left = num_left.add(classencoding)
                num_right = num_right.sub(classencoding)

                gini_left = R.sub(
                    R.Scalar(1),
                    R.sum(
                        R.square(R.foreach(num_left, operation='div',
                                           params=i))))
                gini_right = R.sub(
                    R.Scalar(1),
                    R.sum(
                        R.square(
                            R.foreach(num_right,
                                      operation='div',
                                      params=num_observations - i))))
                gini = R.div(
                    R.add(
                        R.multiply(R.Scalar(i), gini_left),
                        R.multiply(R.Scalar(num_observations - i),
                                   gini_right)), R.Scalar(num_observations))

                decision1 = R.logical_and(thresholds.gather(R.Tensor([i])),
                                          thresholds.gather(R.Tensor([i - 1])))
                decision2 = gini.less(best_gini)
                while decision2.status != "computed":
                    pass

                print(decision2.output == 1)
                if decision2.output == 1 and decision1 != 1:
                    best_gini = gini
                    ideal_col = col
                    ideal_threshold = R.div(
                        R.add(thresholds.gather(R.Tensor([i])),
                              thresholds.gather(R.Tensor([i - 1]))),
                        R.Scalar(2))
        print(ideal_col, ideal_threshold)
        return ideal_col, ideal_threshold
  confusion = R.Tensor(confusion)

  if average=='macro':

    for i in confusion:
      TP ,TN ,FP ,FN = i[0] ,i[1] ,i[2] ,i[3]
      Recall = R.div(TP, R.add(TP, FN))
      Precision = R.div(TP, R.add(TP, FP))

      if Precision == 0 or Recall == 0 
          or Recall == np.nan or Precision == np.nan:
        final.append(0)

      else:
        F1 = R.div(R.elemul(R.Scalar(2), R.elemul(Recall, Precision)),R.sum(Recall ,Precision))
        final.append(F1)
        
    return R.mean(final)

  if average=='micro':

    confusion = R.Tensor(confusion)
    TP = R.sum(confusion ,axis=0)[0]
    TN = R.sum(confusion ,axis=0)[1]
    FP = R.sum(confusion ,axis=0)[2]
    FN = R.sum(confusion ,axis=0)[3]

    Recall = R.div(TP, R.add(TP, FN))
    Precision = R.div(TP, R.add(TP, FP))
    F1 = R.div(R.elemul(R.Scalar(2), R.elemul(Recall, Precision)),R.sum(Recall ,Precision))
 def compute_cost(self):
     residual = self.X.dot(self.theta).sub(self.y)
     while residual.status != 'computed':
         pass
     return (R.Scalar(1).div(R.Scalar(2).multiply(self.m))).multiply(residual.dot(residual.transpose()))