def Evaluate(pk,cl,ck,s={}): #S is for debugging (pk0,x,y)=pk cl0=[] for (i,j) in cl: cl0.append(i) c0 = SomeWhatEvaluate((pk0,x), cl0, ck) z=[None]*(Params.bigo+1) for i in range(1,Params.bigo+1): k=bigfloat.mul(c0,y[i],bigfloat.precision((Params.kappa+Params.gamma))) z[i]=float(bigfloat.mod(k,2,bigfloat.precision((Params.prec)))) if debug: su=0 yo=0 for i in range(1,Params.bigo+1): if s[i]==1: yo=bigfloat.add(yo,y[i],bigfloat.precision((Params.kappa+Params.gamma))) su=bigfloat.add(su,z[i],bigfloat.precision(Params.kappa+Params.gamma)) print "Enc_sum%2=",bigfloat.mod(su,8,bigfloat.precision((Params.prec+Params.gamma))) q=bigfloat.div(c0,globsk,bigfloat.precision(Params.kappa+Params.gamma)) print "(c0/sk)=",q q=bigfloat.mul(c0,yo,bigfloat.precision((Params.kappa+Params.gamma))) print "(c0*yo)=",q q=bigfloat.div(1,globsk,bigfloat.precision(Params.kappa+Params.gamma)) print "(1/sk)=",q print "(yo)=",yo print "(c0*1/sk)=",bigfloat.mul(q,c0,bigfloat.precision((Params.prec+Params.gamma))) q=bigfloat.div(c0,globsk,bigfloat.precision((Params.prec+Params.gamma))) print "(c0/sk)=",q c = (c0,z) return c
def inverse_1_2_exact(small_matrix, dtype=f64): """Performs inverse of small matrices of size 1 or 2 by simple formulas Args: small_matrix (np.array): matrix for inverse searching Returns: np.array: inverse of small matrix Raises: Exception: An error occurred because matrix is not 1x1 or 2x2 Exception: An error occurred because matrix of size 1x1 or 2x2 is singular """ with bf.Context(precision=EXACT_PRECISION): if small_matrix.shape == (1, 1): if small_matrix[0, 0] == 0: raise Exception('Matrix of size 1x1 is singular') return np.array([[1.0 / small_matrix[0, 0]]], dtype=dtype) if small_matrix.shape != (2, 2): raise Exception("Matrix isn't 2x2 matrix") a = small_matrix[0, 0] b = small_matrix[0, 1] c = small_matrix[1, 0] d = small_matrix[1, 1] det = bf.sub(bf.mul(a, d), bf.mul(b, c)) #det = a * d - b * c if det == 0: raise Exception('Matrix of size 2x2 is singular') inverse = np.zeros(small_matrix.shape, dtype=dtype) inverse[0, 0] = bf.div(d, det) inverse[0, 1] = bf.div(-c, det) inverse[1, 0] = bf.div(-b, det) inverse[1, 1] = bf.div(a, det) return inverse
def sigmoid(s): y = [] for k in s: z=[] for l in k: z.append( bigfloat.div( 1. , bigfloat.add(1,bigfloat.exp(-l,bigfloat.precision(precision))))) y.append(z) return np.array(y)
def SomeWhatDecrypt(sk,c): t = sk/2 if debug: x = int(bigfloat.div(c,sk,bigfloat.precision(1000))) y = c/sk print "SomeWhatDecrypt:x:",x print "SomeWhatDecrypt:y:",y print "SomeWhatDecrypt:c%sk:",c%sk print "SomeWhatDecrypt:c-x*sk:",c-x*sk return (c-(c+t)/sk)%2
def tridiagonal_inversion_exact(tridiagonal, cell_sizes, dtype=np.float64): with bf.Context(precision=EXACT_PRECISION): sum = 0 inverse = np.zeros(tridiagonal.shape, dtype=dtype) for cs in cell_sizes: if cs == 1: inverse[1, sum] = bf.div(1, np.float64(tridiagonal[1, sum])) else: a = np.float64(tridiagonal[1, sum]) b = np.float64(tridiagonal[0, sum + 1]) c = np.float64(tridiagonal[1, sum]) d = np.float64(tridiagonal[1, sum + 1]) #det = a * d - b * c det = bf.sub(bf.mul(a, d), bf.mul(b, c)) inverse[1, sum] = bf.div(d, det) inverse[0, sum + 1] = bf.div(-c, det) inverse[1, sum] = bf.div(-b, det) inverse[1, sum + 1] = bf.div(a, det) sum += cs return inverse
def logn2(n, p): """Best p-bit lower and upper bounds for log(2)/log(n), as Fractions.""" with precision(p): extra = 10 while True: with precision(p + extra): # use extra precision for intermediate step log2upper = log2(n, RoundTowardPositive) log2lower = log2(n, RoundTowardNegative) lower = div(1, log2upper, RoundTowardNegative) upper = div(1, log2lower, RoundTowardPositive) # if lower and upper are adjacent (or equal) we're done if next_up(lower) == upper: return (Fraction(*lower.as_integer_ratio()), Fraction(*upper.as_integer_ratio())) # otherwise, increase the precision and try again extra += 10
def predict_dpi(x, s): num = 0 den = 0 for i in range(len(s)): y_i = s[i, len(x)] x_i = s[i, :len(x)] ex = bg.exp(-0.25 * (math.pow(euclidean_distance(x, x_i), 2))) num = bg.add(num, bg.mul(y_i, ex)) den = bg.add(den, ex) return bg.div(num, den)
def logn2(n, p): """Best p-bit lower and upper bounds for log(2)/log(n), as Fractions.""" with precision(p): extra = 10 while True: with precision(p+extra): # use extra precision for intermediate step log2upper = log2(n, RoundTowardPositive) log2lower = log2(n, RoundTowardNegative) lower = div(1, log2upper, RoundTowardNegative) upper = div(1, log2lower, RoundTowardPositive) # if lower and upper are adjacent (or equal) we're done if next_up(lower) == upper: return (Fraction(*lower.as_integer_ratio()), Fraction(*upper.as_integer_ratio())) # otherwise, increase the precision and try again extra += 10
def invert(lower: BigFloat, upper: BigFloat, precision_of_result: int) -> ExactRealProgram: context_down = bf.precision(precision_of_result) + bf.RoundTowardNegative context_up = bf.precision(precision_of_result) + bf.RoundTowardPositive # interval doesn't contain zero then invert and flip [1 / y2, 1 / y1] if (lower > 0 and upper > 0) or (lower < 0 and upper < 0): inv_lower = bf.div(1, upper, context_down) inv_upper = bf.div(1, lower, context_up) lw = [0, -float(inv_upper)**2] uw = [-float(inv_lower)**2, 0] # [lower, 0] -> [-infty, 1 / y1] elif lower < 0 and upper == 0: inv_lower = BigFloat('-inf') inv_upper = bf.div(1, lower, context_up) lw = [0, float('nan')] uw = [-float(inv_lower)**2, 0] # [0, upper] -> [1 / y2, infty] elif lower == 0 and upper > 0: inv_lower = bf.div(1, upper, context_down) inv_upper = BigFloat('inf') lw = [0, -float(inv_upper)**2] uw = [float('nan'), 0] # If the interval includes 0 just give up and return [-infty, infty] # Note: an alternative is to split up intervals, but that's too tricky for now elif lower < 0 < upper: inv_lower = BigFloat('-inf') inv_upper = BigFloat('inf') lw = [0, float('nan')] uw = [float('nan'), 0] # Interval is probably such that lower is greater than upper else: raise ValueError("Input interval is invalid for division") return inv_lower, inv_upper, lw, uw
def relative_likelihood_result_calculator(population): """ Given a population, this function calculates the relative likelihood result (it is a way to make the likelihood result bigger) to every chromosome. Args: population : LIST[Chromosome(), Chromosome(), ...] A list filled with 'Chromosome' objects """ with bf.quadruple_precision: total = sum_likelihood_result(population) for i in range(0, len(population)): log_likelihood_result = bf.exp(bf.BigFloat(str(population[i].get_log_likelihood_result()))) population[i].set_relative_likelihood_result(float(bf.div(log_likelihood_result, total)))
def get_boltzmann_distribution(energy_by_arm): R = 8.3144621 # gas constant T = 293.15 # room temperature factor = 4184.0 # joules_per_kcal boltzmann_distribution = [] for dG in energy_by_arm: ps = [] total = bigfloat.BigFloat(0) for energy in dG: p = bigfloat.exp((-energy*factor)/(R*T), bigfloat.precision(1000)) ps.append(p) total = bigfloat.add(total, p) normal_ps = [] for p in ps: normal_ps.append(float(bigfloat.div(p,total))) boltzmann_distribution.append(numpy.array(normal_ps)) return boltzmann_distribution
def Encrypt(pk,m,calcZ=True,s=None): #s is the secret key to be used for debugging purposes (pk0,x,y)=pk c0 = SomeWhatEncrypt((pk0,x), m) if calcZ: z=[None]*(Params.bigo+1) for i in range(1,Params.bigo+1): k=bigfloat.mul(c0,y[i],bigfloat.precision((Params.kappa+Params.gamma))) z[i]=float(bigfloat.mod(k,2.0,bigfloat.precision(Params.prec))) if z[i]>=2.0: z[i]=0 c = (c0,z) else: c = c0 if debug: su=0 for i in range(1,Params.bigo+1): if s and s[i]==1: su=bigfloat.add(su,z[i],bigfloat.precision(Params.kappa+Params.gamma)) print "Enc_sum%2=",bigfloat.mod(su,8,bigfloat.precision((Params.prec+Params.gamma))) q=bigfloat.div(c0,globsk,bigfloat.precision(Params.kappa+Params.gamma)) print "(Enc_c/sk)%2=",bigfloat.mod(q,8,bigfloat.precision((Params.prec+Params.gamma))) print "c0=",c0 return c
from bigfloat import sub, add, mul, div, sqr, sqrt, precision a=1e-8 b=10 c=1e-8 p = 100 D = sub(sqr(b) , mul(4, mul(a,c) ), precision(p)) x1 = div( - add(b , sqrt(D, precision(p))) , mul(2,a), precision(p)) x2 = div( - sub(b , sqrt(D, precision(p))) , mul(2,a), precision(p)) print x1,x2
def longest_repeat(d): context = bigfloat.precision(precision) result_float = bigfloat.div(1, d, context=context) result = str(result_float)[2:].strip('0')[:-fuzz] result_len = len(result) #print "d = {0}, result = {1}".format(d, result) longest = '' longest_len = 0 found = set() for i in range(result_len): remaining = result[i:] for k in range(i+1, result_len): substr = result[i:k] substr_len = len(substr) if substr == '0' * substr_len: continue new_substr = True for f in found: if substr == f: new_substr = False elif is_repeating(f, substr): new_substr = False if not new_substr: continue #print "new substring {0}".format(substr) repeats = is_repeating(substr, remaining) #print "substring {0} repeats {1} times".format(substr, repeats) if repeats >= min_repeats: #print "found repeating substring {0} (occurred {1} times)".format(substr, repeats, i=i, k=k) found.add(substr) if longest_len < substr_len: #print "new longest substr!" longest = substr longest_len = substr_len if remaining[1:] == remaining[1] * len(remaining[1:]): #print "remaining string is all the same" break if found: #print "Already found repeating substrings, short-circuiting" break if remaining == remaining[0] * len(remaining): #print "remaining string is all the same" break if longest: #print "longest substring for d = {0} is {1}".format(d, longest) pass return longest
def KeyGen(Lambda): global globsk #For debugginh (sk0,pk0)=SomeWhatKeyGen(Lambda) globsk = sk0 #For debugging #Params.kappa = int(Params.gamma * Params.eta / Params.rho1) '''Tweak to approximate to nearest integer''' t=sk0/2+1 xp=int((2**Params.kappa+t)/sk0) # Approximating to nearest integer S = [] lenS=0 while lenS!=Params.theta: i=random.randrange(1,Params.bigo) if i not in S: S.append(i) lenS+=1 s={} for i in range(1,Params.bigo+1): if i in S: s[i]=1 else: s[i]=0 n = 2**(Params.kappa) m = 2**(Params.kappa+1) u = {} approx = xp/Params.theta var = approx/Params.theta for i in range(1,Params.bigo+1): u[i]=random.randrange(0,m) su=0 for i in S[:-1]: x =random.randrange(approx-var,approx+var) u[i]=x su+=u[i] i=S[-1] u[i]=xp-su y={} for i in range(1,Params.bigo+1): y[i]=bigfloat.div(u[i],n,bigfloat.precision((Params.kappa+Params.gamma))) #DEBUG if debug: su = 0 su2=0 for i in S: su2+=u[i] su=bigfloat.add(su,y[i],bigfloat.precision(Params.kappa+Params.gamma)) inv = bigfloat.mul(n,y[i],bigfloat.precision(Params.kappa+Params.gamma)) print u[i] print inv print "sumxp=",su2 print "sumf=",su print "xp=", xp print "xp/n=", bigfloat.div(xp,n,bigfloat.precision(Params.kappa+Params.gamma)) print "m=",m print Params.theta print Params.bigo print S print s #END DEBUG (pk1,x)=pk0 pk = (pk1,x,y) return (s,pk)
def transmit(self, seq, max_channel_use=None, err_num=None, msg_len=4): # PMS settings self.msg_point = self.bin_to_real(seq) print("Message: {}, Px: {}".format(self.msg_point, self.XoverP)) # hamming code settings self.msg_len = msg_len # hamming code message length self.redundant_bits = HammingCode.calc_redundant_bits(msg_len) self.block_len = msg_len + self.redundant_bits print("Hamming Code ({}, {})".format(self.block_len, msg_len)) self.undecodable = False #TODO # h_err_p = self.XoverP # h_err_p = hamming_err_prob(self.XoverP, self.msg_len, self.block_len) h_err_p = hamming_err_prob(self.XoverP / 2, self.msg_len, self.block_len) # estimated by leading order # h_err_p = hamming_LOEP(self.XoverP, self.block_len) # h_err_p = 0.1179648 max_default_use = 500 MCU = max_channel_use if max_channel_use is not None else max_default_use for i in range(MCU): # np.random.seed(i) # debug mode # split probability tree, figure out which block msg belongs to msg_pmf = self.tree.PMF(self.msg_point) msg_seq, msg_order = self.real_to_bin(msg_pmf, msg_len) self.X = msg_seq # print("X: {}".format(self.X)) # hamming encoding: h = HammingCode(self.X) U = h.encode() # msg to be send thru channel # print("U: {}".format(U)) # Binary Symmetric Channel transmission: v = self.channel_transmit(U, err_num) # print("V: {}".format(v)) # hamming decoding: err_pos = h.detectError(v) # reverse order if err_pos == 0: # no error self.Y = self.X elif err_pos <= len(v): # able to recover u from v err_pos = len(v) - err_pos lost_bit = '1' if v[ err_pos] == '0' else '0' # flip the error bit correct_v = v[:err_pos] + lost_bit + v[err_pos + 1:] self.Y = h.decode(correct_v[::-1]) else: #TODO # print("Hamming code can't correct error in {} with error position".format(v, err_pos)) self.undecodable = True # self.Y = h.decode(v) continue # print("Y: {}".format(self.Y)) ''' Update probability: scale up the prob block msg belongs to, and scale down the other prob block. Divide tree into three parts by l- ower/upper bounds of Y's interval: the left part, the middle part, and the right part. Assume the crossover probability is a, so we s- hould scale up P([lb, ub]) by a, and scale down P([0,lb], [ub,1]) by 1 - a. The procedures consist of two steps: 1. Scale down the left part and scale up the other parts. 2. Scale up the middle part and scale down the right part. Note that if either the left part or the right part is empty, the situation is the same as standard posterior matching scheme. ''' # probability lower/upper bounds of Y's interval Y_order, Y_pmf_lb, Y_pmf_ub = self.find_interval(self.Y) if Y_order == 0: # left part is empty Y_node_ub = self.tree.quantile(Y_pmf_ub) self.peak = Y_node_ub.start_value self.tree = Y_node_ub.parent.rotate() self.tree.left.p, self.tree.right.p = 1 - h_err_p, h_err_p elif Y_order == 2**self.msg_len - 1: # right part is empty Y_node_lb = self.tree.quantile(Y_pmf_lb) self.peak = Y_node_lb.start_value self.tree = Y_node_lb.parent.rotate() self.tree.left.p, self.tree.right.p = h_err_p, 1 - h_err_p else: # nodes of lower/upper bounds of Y's interval Y_node_lb = self.tree.quantile(Y_pmf_lb) Y_node_ub = self.tree.quantile(Y_pmf_ub) # number of intervals in the left\right part left_num = Y_order right_num = 2**self.msg_len - 1 - Y_order unit_prob = bf.div(h_err_p, (2**self.msg_len - 1)) self.peak = (Y_node_lb.start_value + Y_node_ub.start_value) / 2 # step 1 self.tree = Y_node_lb.parent.rotate() self.tree.left.p = unit_prob * left_num self.tree.right.p = 1 - self.tree.left.p # step 2 sub = self.tree.right sub.parent = None self.tree.right = None sub = Y_node_ub.parent.rotate() sub_total = 1 - h_err_p + unit_prob * right_num sub.left.p = bf.BigFloat(1 - h_err_p) / sub_total sub.right.p = 1 - sub.left.p self.tree.right = sub sub.parent = self.tree # print("-"*80) # self.tree.visualize() if self.check_ending(): bin_seq, _ = self.real_to_bin(self.peak, len(self.seq)) return bin_seq, i + 1, self.block_len bin_seq, _ = self.real_to_bin(self.peak, len(self.seq)) print("You have reached the maximum expected channel use!") return bin_seq, MCU, self.block_len
original_minutes = minutes if minutes > MINUTES_PER_WEEK: weeks = minutes / MINUTES_PER_WEEK minutes -= weeks * MINUTES_PER_WEEK else: weeks = 0 print minutes if minutes > MINUTES_PER_DAY: days = minutes / MINUTES_PER_DAY minutes -= days * MINUTES_PER_DAY else: days = 0 print minutes if minutes > MINUTES_PER_HOUR: hours = minutes / MINUTES_PER_HOUR minutes -= hours * MINUTES_PER_HOUR else: hours = 0 print "%i weeks %i days %i hours %i minutes" % (weeks, days, hours, minutes) uptime = div((MINUTES_PER_YEAR - original_minutes), (MINUTES_PER_YEAR))*100 print "If you entered total downtime per year in minutes, that's an uptime of " print uptime