def _weights(self, dists, index, sigma_sq): w = np.fromiter((gaussian_kernel(d, sigma_sq) for d in dists), np.float, len(dists)) # Weight by timestamp of samples to forget old values max_index = max(index) wt = np.fromiter((gaussian_kernel(max_index - idx, self.sigma_t_sq) for idx in index), np.float, len(dists)) w = w * wt wsum = w.sum() if wsum == 0: return 1.0/len(dists)*np.ones((len(dists),)) else: eps = wsum * 1e-10 / self.dim_x return np.fromiter((w_i/wsum if w_i > eps else 0.0 for w_i in w), np.float)
def _weights(self, dists, index, sigma_sq): w = np.fromiter((gaussian_kernel(d, sigma_sq) for d in dists), np.float, len(dists)) wsum = w.sum() if wsum == 0: return 1.0 / len(dists) * np.ones((len(dists), )) else: eps = wsum * 1e-10 / self.dim_x return np.fromiter((w_i / wsum if w_i > eps else 0.0 for w_i in w), np.float)
def _weights(self, dists, sigma_sq): w = np.fromiter((gaussian_kernel(d/self.dim_x, sigma_sq) for d in dists), np.float) # We eliminate the outliers # TODO : actually reduce w and index wsum = w.sum() if wsum == 0: return 1.0/len(dists)*np.ones((len(dists),)) else: eps = wsum * 1e-15 / self.dim_x return np.fromiter((w_i/wsum if w_i > eps else 0.0 for w_i in w), np.float)
def _weights(self, dists, sigma_sq): w = np.fromiter((gaussian_kernel(d / self.dim_x, sigma_sq) for d in dists), np.float) # We eliminate the outliers # TODO : actually reduce w and index wsum = w.sum() if wsum == 0: return 1.0 / len(dists) * np.ones((len(dists), )) else: eps = wsum * 1e-15 / self.dim_x return np.fromiter((w_i / wsum if w_i > eps else 0.0 for w_i in w), np.float)
def _weights(self, index, dists, sigma_sq, y_desired): dists = [dist(self.fmodel.dataset.get_y(idx), y_desired) for idx in index] # could be optimized w = np.fromiter((gaussian_kernel(d/self.fmodel.dim_y, sigma_sq) for d in dists), np.float) # We eliminate the outliers # TODO : actually reduce w and index wsum = w.sum() if wsum == 0: return 1.0/len(dists)*np.ones((len(dists),)) else: eps = wsum * 1e-15 / self.fmodel.dim_y return np.fromiter((w_i/wsum if w_i > eps else 0.0 for w_i in w), np.float)
def _weights(self, index, dists, sigma_sq, y_desired): dists = [ dist(self.fmodel.dataset.get_y(idx), y_desired) for idx in index ] # could be optimized w = np.fromiter((gaussian_kernel(d / self.fmodel.dim_y, sigma_sq) for d in dists), np.float) # We eliminate the outliers # TODO : actually reduce w and index wsum = w.sum() if wsum == 0: return 1.0 / len(dists) * np.ones((len(dists), )) else: eps = wsum * 1e-15 / self.fmodel.dim_y return np.fromiter((w_i / wsum if w_i > eps else 0.0 for w_i in w), np.float)