def normalized_sample(self, W): """ Normalized sampling without division. Args: W: a set of weights from which to sample. Returns: an integer in [0,len(W)] corresponding to the index sampled. """ t = gmpy2.fsum(W) # compute total weight C = [gmpy2.fsum(W[0:i + 1]) for i in range(0, len(W))] # compute cumulative weights # Determine the maximum power of two for sampling i_max = 0 while gmpy2.exp2(i_max) > t: i_max -= 1 while gmpy2.exp2(i_max) <= t: i_max += 1 # sample a random number s = gmpy2.exp2(i_max + 1) while s > t: s = self.get_random_value(i_max, self.context.precision) # return the element that matches the sampled index for i in range(0, len(W)): if C[i] >= s: return i
def hessian(x, y, t_1, t_2): psigmoid = [sigmoid(i, t_1, t_2) for i in x] u = [p * (1 - p) * q * q for (p, q) in zip(psigmoid, x)] d1 = gmpy2.fsum(u) v = [p * (1 - p) * q for (p, q) in zip(psigmoid, x)] d2 = gmpy2.fsum(v) w = [p * (1 - p) for p in psigmoid] d3 = gmpy2.fsum(w) H = np.array([[d1, d2], [d2, d3]]) return H
def logLikelihood(x, y, t_1, t_2): psigmoid = [sigmoid(i, t_1, t_2) for i in x] u = [ q * gmpy2.log(p) + (1 - q) * gmpy2.log(1 - p) for (p, q) in zip(psigmoid, y) ] return gmpy2.fsum(u)
def optimized_normalized_sample(self, W): """ Optimized Normalized sampling without division. Args: W: a set of weights from which to sample. Returns: an integer in [0,len(W)] corresponding to the index sampled. WARNING: introduces a timing channel for differing weight distributions. """ t = gmpy2.fsum(W) # compute total weight C = [gmpy2.fsum(W[0:i + 1]) for i in range(0, len(W))] # compute cumulative weights s = 0 log2t = 0 while gmpy2.exp2(log2t) > t: log2t -= 1 while gmpy2.exp2(log2t) <= t: log2t += 1 j = log2t - 1 remaining = [i for i in range(0, len(W))] if t < gmpy2.exp2(log2t): remaining.append(len(W)) # add a dummy value C.append(gmpy2.exp2(log2t)) while len(remaining) > 1: r = self.rng() s = s + r * gmpy2.exp2(j) to_remove = [] for i in remaining: # check if each remaining index is still reachable if C[i] <= s: to_remove.append(i) if i > 0: if C[i - 1] >= s + gmpy2.exp2(j): to_remove.append(i) for i in to_remove: remaining.remove(i) if len(remaining) == 1 and remaining[0] == len(W): s = 0 j = log2t # don't subtract 1, it's going to be decremented remaining = [i for i in range(0, len(W))] if t < gmpy2.exp2(log2t): remaining.append(len(W)) # add a dummy value C.append(gmpy2.exp2(log2t)) j -= 1 return remaining[0]
def dot(self, other): """Return the dot product of this MPFRVector with another MPFRVector.""" if not isinstance(other, MPFRVector): raise ValueError( "cannot take dot product of MPFRVector with non-MPFRVector") if len(self.entries) != len(other.entries): raise ValueError( "cannot take dot product MPFRVectors with different lengths") return gmpy2.fsum(x * y for x, y in zip(self.entries, other.entries))
def gradient(x, y, t_1, t_2): psigmoid = [sigmoid(i, t_1, t_2) for i in x] u = [q - p for (p, q) in zip(psigmoid, y)] v = [t * k for (t, k) in zip(u, x)] return np.array([[gmpy2.fsum(v), gmpy2.fsum(u)]])
def log_likelihood(x,y, t_1, t_2): sigmoidP = [sigmoid(i, t_1, t_2) for i in x] u = [q*gmpy2.log(p) + (1 - q)*gmpy2.log(1 - p) for p,q in zip(sigmoidP,y)] return gmpy2.fsum(u)
def norm_squared(self): """Return the squared Euclidean norm of this MPFRVector.""" return gmpy2.fsum(map(gmpy2.square, self.entries))