def categorical_hinge(y_true, y_pred): if not isinstance(y_true, R.Tensor): y_true = R.Tensor(y_true) if not isinstance(y_pred, R.Tensor): y_pred = R.Tensor(y_pred) neg = R.max(R.elemul(R.sub(R.Scalar(-1), y_true), y_pred)) pos = R.sum(R.elemul(y_true, y_pred)) loss = R.max((R.sub(neg, pos), R.Scalar(1)), R.Scalar(0)) return loss
def log_loss(y_true, y_pred, with_logit=True): if with_logit: y_pred = sigmoid(y_pred) else: pass y_pred = R.clip(y_pred, R.epsilon(), R.sub(R.Scalar(1), R.epsilon())) loss = R.elemul(R.Scalar(-1), R.mean(R.elemul(y_true, R.natlog(y_pred)), R.elemul((R.sub(R.Scalar(1), y_true)), R.natlog(R.sub(R.Scalar(1), y_pred))))) return loss
def r2_score(y_true, y_pred): if not isinstance(y_true, R.Tensor): y_true = R.Tensor(y_true) if not isinstance(y_pred, R.Tensor): y_pred = R.Tensor(y_pred) scalar1 = R.Scalar(1) SS_res = R.sum(R.square(R.sub(y_true, y_pred))) SS_tot = R.sum(R.square(R.sub(y_true, R.mean(y_true)))) return R.sub(scalar1, R.div(SS_res, R.add(SS_tot, R.epsilon())))
def normalize(x): """ Normalize an array """ if not isinstance(x, R.Tensor): x = R.Tensor(x) if len(x.output.shape) > 1: raise Exception("Unsupported input type") max = R.max(x) min = R.min(x) return R.div(R.sub(x, min), R.sub(max, min))
def huber(y_true, y_pred, d): if not isinstance(y_true, R.Tensor): y_true = R.Tensor(y_true) if not isinstance(y_pred, R.Tensor): y_pred = R.Tensor(y_pred) d = R.Scalar(d) x = R.sub(y_true, y_pred) if R.abs(x) <= d: return R.elemul(R.Scalar(d), R.elemul(x, x)) if R.abs(x) > d: return R.add(R.elemul(R.Scalar(d), R.mul(d, d)), R.elemul(d, R.sub(R.abs(x), d)))
def Poisson_loss(y_true, y_pred): if not isinstance(y_true, R.Tensor): y_true = R.Tensor(y_true) if not isinstance(y_pred, R.Tensor): y_pred = R.Tensor(y_pred) y_pred = R.clip(y_pred, R.epsilon(), R.Saclar(1) - R.epsilon()) return R.sub(y_pred, R.elemul(y_true, R.natlog(y_pred)))
def mean_squared_log_error(y_true, y_pred): """ Mean Squared Log Error """ if not isinstance(y_true, R.Tensor): y_true = R.Tensor(y_true) if not isinstance(y_pred, R.Tensor): y_pred = R.Tensor(y_pred) return R.mean(R.pow(R.sub(R.natlog(R.add(y_true, R.one())), R.natlog(R.add(y_pred, R.one()))), R.Scalar(2)))
def mean_absolute_error(y_true, y_pred): """ Mean Absolute Error """ if not isinstance(y_true, R.Tensor): y_true = R.Tensor(y_true) if not isinstance(y_pred, R.Tensor): y_pred = R.Tensor(y_pred) return R.mean(R.abs(R.sub(y_pred, y_true)))
def r2_score(y_true, y_pred): if isinstance(y_true, R.Tensor) or isinstance(y_true, R.Op): pass else: y_true = R.Tensor(y_true, name="y_true") if isinstance(y_pred, R.Tensor) or isinstance(y_pred, R.Op): pass else: y_pred = R.Tensor(y_pred, name="y_pred") print(type(y_true), type(y_pred)) scalar1 = R.Scalar(1) SS_res = R.sum(R.square(R.sub(y_pred, y_true)), name="ss_res") SS_tot = R.sum(R.square(R.sub(y_true, R.mean(y_true))), name="ss_tot") return R.sub(scalar1, R.div(SS_res, SS_tot), name="r2_score")
def mean_squared_error(y_true, y_pred): """ Mean Squared Error """ if not isinstance(y_true, R.Tensor): y_true = R.Tensor(y_true) if not isinstance(y_pred, R.Tensor): y_pred = R.Tensor(y_pred) return R.mean(R.pow(R.sub(y_true, y_pred), R.Scalar(2)))
def standardize(x): """ Standardize an array """ if not isinstance(x, R.Tensor): x = R.Tensor(x) mean = R.mean(x) std = R.std(x) return R.div(R.sub(x, mean), std)
def pearson_correlation(x, y): """ Calculate linear correlation(pearson correlation) """ if not isinstance(x, R.Tensor): x = R.Tensor(x) if not isinstance(y, R.Tensor): y = R.Tensor(y) a = R.sum(R.square(x)) b = R.sum(R.square(y)) n = a.output.shape[0] return R.div( R.sub(R.multiply(R.Scalar(n), R.sum(R.multiply(x, y))), R.multiply(R.sum(x), R.sum(y))), R.multiply( R.square_root(R.sub(R.multiply(R.Scalar(n), a), R.square(b))), R.square_root(R.sub(R.multiply(R.Scalar(n), b), R.square(b)))))
def z_score(x, axis=None): if not isinstance(x, R.Tensor): x = R.Tensor(x) if axis is not None: mean = R.mean(x, axis=axis) std = R.std(x, axis=axis) else: mean = R.mean(x) std = R.std(x) return R.div(R.sub(x, mean), std)
def find_split(self, X, y): ideal_col = None ideal_threshold = None num_observations = y.shape_().gather(R.Scalar(0)) while num_observations.status != 'computed': pass num_observations = int(num_observations.output) if num_observations <= 1: return ideal_col, ideal_threshold y = y.reshape(shape=[num_observations]) count_in_parent = R.Tensor([]) for c in range(self.num_classes): count_in_parent = count_in_parent.concat( R.sum(R.equal(y, R.Scalar(c))).expand_dims()) gini = R.square( count_in_parent.foreach(operation='div', params=num_observations)) best_gini = R.sub(R.Scalar(1.0), R.sum(gini)) temp_y = y.reshape(shape=[num_observations, 1]) for col in range(self.num_features): temp_X = R.gather( R.transpose(X), R.Scalar(col)).reshape(shape=[num_observations, 1]) all_data = R.concat(temp_X, temp_y, axis=1) column = R.gather(R.transpose(X), R.Scalar(col)) ind = column.find_indices(R.sort(R.unique(column))) while ind.status != "computed": pass inform_server() sorted_data = R.Tensor([]) for i in ind.output: sorted_data = sorted_data.concat(all_data.gather( R.Tensor(i))) # need to find another way to sort sorted_data_tpose = sorted_data.transpose() thresholds = sorted_data_tpose.gather(R.Scalar(0)).gather( R.Scalar(0)) obs_classes = sorted_data_tpose.gather(R.Scalar(1)).gather( R.Scalar(0)) num_left = R.Tensor([0] * self.num_classes) # need ops num_right = count_in_parent for i in range(1, num_observations): class_ = R.gather(obs_classes, R.Tensor([i - 1])) classencoding = R.one_hot_encoding( class_, depth=self.num_classes).gather(R.Scalar(0)) num_left = num_left.add(classencoding) num_right = num_right.sub(classencoding) gini_left = R.sub( R.Scalar(1), R.sum( R.square(R.foreach(num_left, operation='div', params=i)))) gini_right = R.sub( R.Scalar(1), R.sum( R.square( R.foreach(num_right, operation='div', params=num_observations - i)))) gini = R.div( R.add( R.multiply(R.Scalar(i), gini_left), R.multiply(R.Scalar(num_observations - i), gini_right)), R.Scalar(num_observations)) decision1 = R.logical_and(thresholds.gather(R.Tensor([i])), thresholds.gather(R.Tensor([i - 1]))) decision2 = gini.less(best_gini) while decision2.status != "computed": pass print(decision2.output == 1) if decision2.output == 1 and decision1 != 1: best_gini = gini ideal_col = col ideal_threshold = R.div( R.add(thresholds.gather(R.Tensor([i])), thresholds.gather(R.Tensor([i - 1]))), R.Scalar(2)) print(ideal_col, ideal_threshold) return ideal_col, ideal_threshold
def __compute_cost(self, y, y_pred, no_samples, name="cost"): """Cost function""" return R.multiply(R.Scalar(1.0 / (2.0 * no_samples.output)), R.sum(R.square(R.sub(y_pred, y))), name=name)
def tanh(x): """ Tanh Activation Function """ return R.div(R.sub(R.exp(x), R.exp(R.mul(R.minus_one(), x))), R.add(R.exp(x), R.exp(R.mul(R.minus_one(), x))))
def __eucledian_distance(self, X): X = R.expand_dims(X, axis=1, name="expand_dims") return R.square_root( R.sub(X, self.X_train).pow(R.Scalar(2)).sum(axis=2))
def closest_centroids(self, points, centroids): centroids = R.expand_dims(centroids, axis=1) return R.argmin( R.square_root(R.sum(R.square(R.sub(points, centroids)), axis=2)))
def closest_centroids(self, centroids): centroids = R.expand_dims(centroids, axis=1) return R.argmin( square_root( R.sub(self.points, centroids).pow(Scalar(2)).sum(axis=2)))
import time import ravop.core as R from ravcom import inform_server a = R.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) b = R.Scalar(10) c = R.add(a, b) d = R.sub(a, b) e = R.multiply(a, b) f = R.mean(a) g = R.median(a) inform_server() # Wait for 10 seconds time.sleep(10) print(c()) print(d()) print(e()) print(f()) print(g())
def __euclidean_distance(self, X): X = R.expand_dims(X, axis=1, name="expand_dims") return R.square_root(R.sub(X, self._X).pow(Scalar(2)).sum(axis=2))
def eucledian_distance(self, X, Y): return R.square_root(((R.sub(X, Y)).pow(Scalar(2))).sum(axis=0))