def numeric_discovery_measures(model): nrules = model.number_rules #nrows= len(model.target_values) kl_supp, kl_usg, wkl_supp, wkl_usg, wkl_sum = np.zeros(nrules), np.zeros( nrules), np.zeros(nrules), np.zeros(nrules), np.zeros(nrules) wacc_supp, wacc_usg = np.zeros(nrules), np.zeros(nrules) support, usage = np.zeros(nrules), np.zeros(nrules) std_rules = [stat["variance"]**0.5 for stat in model.statistic_rules] std_rulesalternative = [] tid_covered = mpz() for r in range(nrules): tid_support = model.bitset_rules[r] tid_usage = model.bitset_rules[r] & ~tid_covered tid_covered = tid_covered | tid_support aux_bitset = xmpz(tid_support) idx_bits = list(aux_bitset.iter_set()) values_support = model.target_values[idx_bits] aux_bitset = xmpz(tid_usage) idx_bits = list(aux_bitset.iter_set()) values_usage = model.target_values[idx_bits] support[r] = len(values_support) usage[r] = len(values_usage) kl_supp[r], wkl_supp[r] = kullbackleibler_gaussian_paramters( model, values_support) kl_usg[r], wkl_usg[r] = kullbackleibler_gaussian_paramters( model, values_usage) std_rulesalternative.append(np.std(values_usage)) wacc_supp[r] = wracc_numeric(model, values_support) wacc_usg[r] = wracc_numeric(model, values_usage) #print(wkl_usg) wkl_sum = sum(wkl_usg) # Average them all!!!! measures = dict() measures["avg_supp"] = np.mean(support) measures["kl_supp"] = np.mean(kl_supp) measures["wkl_supp"] = np.mean(wkl_supp) measures["avg_usg"] = np.mean(usage) measures["kl_usg"] = np.mean(kl_usg) measures["wkl_usg"] = np.mean(wkl_usg) measures["wacc_supp"] = np.mean(wacc_supp) measures["wacc_usg"] = np.mean(wacc_usg) uptm = np.triu_indices(nrules - 1, 1) measures["jacc_avg"] = np.sum(model.jaccard_matrix) / len(uptm[0]) measures["n_rules"] = model.number_rules measures["avg_items"] = sum([len(ant) for ant in model.antecedent_raw ]) / model.number_rules measures["wkl_sum"] = wkl_sum measures["std_rules"] = np.mean(std_rules) measures["top1_std"] = std_rules[0] measures["length_orig"] = model.length_original measures["length_final"] = model.length_data + model.length_model measures["length_ratio"] = model.length_ratio return measures
def nominal_discovery_measures(default_prob_per_class, subgroup_bitarray, X, Y): nrules = len(subgroup_bitarray) nusage_fail = 0 # number of rules that the usage fails nrows = X.shape[0] data_prob_class = default_prob_per_class wkl_supp, wkl_usg, wkl_sum = np.zeros(nrules), np.zeros(nrules), np.zeros( nrules) wacc_supp, wacc_usg = np.zeros(nrules), np.zeros(nrules) support, usage = np.zeros(nrules), np.zeros(nrules) tid_covered = mpz() list_bitsets = [] number_targets = len(data_prob_class) for r, bitarray in enumerate(subgroup_bitarray): tid_support = bitarray list_bitsets.append(tid_support) tid_usage = tid_support & ~tid_covered tid_covered = tid_covered | tid_support aux_bitset = xmpz(tid_support) idx_bits = list(aux_bitset.iter_set()) values_support = Y.iloc[idx_bits, :].values aux_bitset = xmpz(tid_usage) idx_bits = list(aux_bitset.iter_set()) values_usage = Y.iloc[idx_bits, :].values support[r] = values_support.shape[0] usage[r] = values_usage.shape[0] wkl_supp[r], wacc_supp[r] = wkl_wracc(data_prob_class, values_support, nrows, number_targets) if usage[r] != 0: wkl_usg[r], wacc_usg[r] = wkl_wracc(data_prob_class, values_usage, nrows, number_targets) else: nusage_fail += 1 wkl_sum = sum(wkl_usg) # Average them all!!!! measures = dict() measures["avg_supp"] = np.mean(support) measures["wkl_supp"] = np.mean(wkl_supp) measures["avg_usg"] = np.sum(usage) / (nrules - nusage_fail) measures["wkl_usg"] = np.sum(wkl_usg) / (nrules - nusage_fail) measures["wacc_supp"] = np.mean(wacc_supp) measures["wacc_usg"] = np.sum(wacc_usg) / (nrules - nusage_fail) measures["jacc_avg"], jaccard_matrix = jaccard_index_model(list_bitsets) measures["n_rules"] = nrules - nusage_fail #measures["avg_items"] = sum([len(sg.pattern) for sg in rulelist.subgroups]) / rulelist.number_rules measures["wkl_sum"] = wkl_sum measures["wkl_sum_norm"] = wkl_sum / X.shape[0] measures["wacc_supp_sum"] = np.sum(wacc_supp) measures["wacc_usg_sum"] = np.sum(wacc_usg) return measures
def nominal_discovery_measures(rulelist, X, Y): nrules = rulelist.number_rules nrows = X.shape[0] data_prob_class = rulelist.default_rule_statistics.prob_per_classes wkl_supp, wkl_usg, wkl_sum = np.zeros(nrules), np.zeros(nrules), np.zeros( nrules) wacc_supp, wacc_usg = np.zeros(nrules), np.zeros(nrules) support, usage = np.zeros(nrules), np.zeros(nrules) tid_covered = mpz() list_bitsets = [] number_targets = len(rulelist.default_rule_statistics.prob_per_classes) for r in range(nrules): tid_support = rulelist.subgroups[r].bitarray list_bitsets.append(tid_support) tid_usage = tid_support & ~tid_covered tid_covered = tid_covered | tid_support aux_bitset = xmpz(tid_support) idx_bits = list(aux_bitset.iter_set()) values_support = Y.iloc[idx_bits, :].values aux_bitset = xmpz(tid_usage) idx_bits = list(aux_bitset.iter_set()) values_usage = Y.iloc[idx_bits, :].values support[r] = values_support.shape[0] usage[r] = values_usage.shape[0] wkl_supp[r], wacc_supp[r] = wkl_wracc(data_prob_class, values_support, nrows, number_targets) wkl_usg[r], wacc_usg[r] = wkl_wracc(data_prob_class, values_usage, nrows, number_targets) wkl_sum = sum(wkl_usg) # Average them all!!!! measures = dict() measures["avg_supp"] = np.mean(support) measures["wkl_supp"] = np.mean(wkl_supp) measures["avg_usg"] = np.mean(usage) measures["wkl_usg"] = np.mean(wkl_usg) measures["wacc_supp"] = np.mean(wacc_supp) measures["wacc_usg"] = np.mean(wacc_usg) measures["jacc_avg"], jaccard_matrix = jaccard_index_model(list_bitsets) measures["n_rules"] = rulelist.number_rules measures["avg_items"] = sum([len(sg.pattern) for sg in rulelist.subgroups ]) / rulelist.number_rules measures["wkl_sum"] = wkl_sum measures["wkl_sum_norm"] = wkl_sum / X.shape[0] measures["wacc_supp_sum"] = np.sum(wacc_supp) measures["wacc_usg_sum"] = np.sum(wacc_usg) measures["length_orig"] = rulelist.length_original measures["length_final"] = rulelist.length_data + rulelist.length_model measures["length_ratio"] = rulelist.length_ratio return measures
def countTime(): millis = 0 for n in mersenne: millis = int(round(time.time() * 1000000000000)) p = gmpy2.xmpz(n) s = gmpy2.xmpz(2) # (s**p)-1 millis = int(round(time.time() * 1000000000000)) - millis print "time: %d"%millis with open('file.txt', 'a') as f: f.write("%d\n"%(n)) with open('mersenne.txt', 'a') as f: f.write("%d\n"%(millis))
def plotMillerTime(p): multiVar = 10000000000 mills = int(round(time.time()*multiVar)) n = (gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p))+(gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p)-1) while not miller_rabin.millerRabin(n, 2): n = (gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p))+(gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p)-1) mills = int(round(time.time()*multiVar)) - mills return (mills, bit_length(n), totalDigits(n))
def tetration_mod(a:int,height:int,digits:int)->int: """Gets the rightmost digits of a tetration operation Notes: 1. Wikipedia's entry on [Graham's number](https://en.wikipedia.org/wiki/Graham%27s_number) describes an algorithm for getting the rightmost digits of a tetration operation: > A simple algorithm for computing these digits may be described as follows: > let x = 3, then iterate, d times, the assignment x = 3**x mod 10d. Except for omitting > any leading 0s, the final value assigned to x (as a base-ten numeral) is then composed > of the d rightmost decimal digits of 3↑↑n, for all n > d. (If the final value of x > has fewer than d digits, then the required number of leading 0s must be added.) 2. Native python is painfully slow; using [GMP](http://gmplib.org) for integer operations is way faster. See also https://gmpy2.readthedocs.io/en/latest/index.html Args: a (int): Argument height (int): Height digits (int): Number of rightmost digits to capture Returns: int: Rightmost digits """ result = gmpy2.xmpz(a) for i in range(1,min(height,digits+1)): result = (a**result)%(10**digits) return result
def mk_bitarray(self, index, partition='all'): """Produces a bitarray representing the presence / absence of families in the organism using the provided index The bitarray is stored in the :attr:`bitarray` attribute and is a :class:`gmpy2.xmpz` type. :param index: The index computed by :func:`ppanggolin.pangenome.Pangenome.getIndex` :type index: dict[:class:`ppanggolin.genome.Organism`, int] """ self.bitarray = gmpy2.xmpz() # pylint: disable=no-member if partition == 'all': logging.getLogger().debug(f"all") for fam in self.families: self.bitarray[index[fam]] = 1 elif partition == 'persistent': logging.getLogger().debug(f"persistent") for fam in self.families: if fam.namedPartition in ['persistent']: self.bitarray[index[fam]] = 1 elif partition in ['shell', 'cloud']: logging.getLogger().debug(f"shell, cloud") for fam in self.families: if fam.namedPartition == partition: self.bitarray[index[fam]] = 1 elif partition == 'accessory': logging.getLogger().debug(f"accessory") for fam in self.families: if fam.namedPartition in ['shell', 'cloud']: self.bitarray[index[fam]] = 1 else: raise Exception( "There is not any partition corresponding please report a github issue" )
def P_generic(m, _x, debug=False): """ Used for finding s0, this is a generic implementation because m might be any value and optimizations aren't needed anyway since this is called once per number. Algorithm found in: https://vixra.org/pdf/1303.0195v1.pdf Test data can ve verified using: # for checking s0 for k=2001 b=2 https://www.wolframalpha.com/input/?i=2+*+chebyshevT%282*2001%2F2%2C+chebyshevT%282%2F2%2C+2%29%29 """ print("M {} X {}".format(m, _x)) m = mpz(m) x = mpz(_x) a = mpfr(mpfr(2)**-m) inner = pow(x, mpz(2)) inner -= mpz(4) inner = sqrt(inner) #inner = x - (sqrt4 / x) # potential replacement in cases x >= 5 x += inner print("x before pow: {:.64f}".format(x)) print("a before pow: {:.64f}".format(a)) x **= m x *= a result = xmpz(round_away(x)) if debug: print("P_gen: {} {}".format(m, _x)) return result
def generate_p_q(L, N): g = N # g >= 160 n = (L - 1) // g #n es igual cociente entre 1023/160 = 6 b = (L - 1) % g #b es igual al residuo entre 1023/160 = 63 while True: # generate q while True: s = xmpz( randrange(1, 2**(g)) ) #randrange es una funcion que brinda un numero aleatorio entre 1 y 2^160 a = sha1(to_binary(s)).hexdigest( ) #se convierte s a binario y se le saca el hash en hexadecimal zz = xmpz( (s + 1) % (2**g) ) #se ubica un numero + 1 a s y se confirma que este en el cuerpo de 2^160 z = sha1(to_binary(zz)).hexdigest( ) #se convierte zz a binario y se le sacah el hash en hexadecimal U = int(a, 16) ^ int( z, 16 ) #se covierte los hashes a enteros en base 16 y se realiza una operacion XOR mask = 2**( N - 1 ) + 1 #se genera una mascara que sea un bit menos a 2^160 osea (2^159) + 1 q = U | mask #se realiza una operacion OR entre U y la mascara if is_prime(q, 20): #se verifica si q es primo de 20 break # generate p i = 0 # counter j = 2 # offset while i < 4096: V = [] for k in range(n + 1): arg = xmpz((s + j + k) % (2**g)) zzv = sha1(to_binary(arg)).hexdigest() V.append(int(zzv, 16)) W = 0 for qq in range(0, n): W += V[qq] * 2**(160 * qq) W += (V[n] % 2**b) * 2**(160 * n) X = W + 2**(L - 1) c = X % (2 * q) p = X - c + 1 # p = X - (c - 1) if p >= 2**(L - 1): if is_prime(p, 10): return p, q i += 1 j += n + 1
def generate_g(p, q): while True: h = randrange(2, p - 1) exp = xmpz((p - 1) // q) g = powmod(h, exp, p) if g > 1: break return g
def generate_g(self,p,q): #生成g g=t^(p-1)/qmod p 1<t<p-1 while True: h=randrange(2,p-1) #随机选取t 1<h<p-1 h^(p-1)/qmodp>1成立的整数 exp=xmpz((p-1)//q) g=powmod(h,exp,p) if g>1: break return g #生成的一个g=h^(p-1)/q
def sieve(limit=1000000): sieve_limit = gmpy2.isqrt(limit) + 1 limit += 1 bitmap = gmpy2.xmpz(3) bitmap[4:limit:2] = -1 for p in bitmap.iter_clear(3, sieve_limit): bitmap[p * p:limit:p + p] = -1 return bitmap.iter_clear(2, limit)
def apply_bitmask_to_num(mask: str, num: int) -> int: num = xmpz(num) for i in range(36): if mask[i] == '1': num[i] = 1 elif mask[i] == '0': num[i] = 0 return int(num)
def MonPro2 (X, Y): # Montgomery multiplication calc. (a_* b_) * r_ mod n global n global s #global n_ #global r n11 = modinv(n, 2) # r = 2 n11_ = 2 - n1 A = 0 m = s # (n.bit_length() or 1) x = gmpy2.xmpz(X) y = gmpy2.xmpz(Y) for k in range(0, m): a = gmpy2.xmpz(A) q = mod((a[0] + x[k]*y[0])*n11_, 2) # pow(2, k) A = (A + x[k]*Y + q*n) // 2 if (A >= n): # ?????? A = A - n return A
def mkBitarray(self, index): """Produces a bitarray representing the presence / absence of the family in the pangenome using the provided index The bitarray is stored in the :attr:`bitarray` attribute and is a :class:`gmpy2.xmpz` type. :param index: The index computed by :func:`ppanggolin.pangenome.Pangenome.getIndex` :type index: dict[:class:`ppanggolin.genome.Organism`, int] """ self.bitarray = gmpy2.xmpz(0) #pylint: disable=no-member for org in self.organisms: self.bitarray[index[org]] = 1
def ModExp (a, e, n): # Modular exponentiation with MonPro global r a_ = mod(a * r, n) x_ = mod(1 * r, n) j = (e.bit_length() or 1) e = gmpy2.xmpz(e) for i in range(j-1, -1, -1): # loop on i from j-1 downto 0 x_ = MonPro(x_, x_) if (e[i] == 1): x_ = MonPro(x_, a_) return MonPro(x_, 1) # mod(x_ * r_, n)
def create_organisms_bits(dic_cluster_nb): print("CREATE BITS...") dic_organism_cluster = {} for c in dic_cluster_nb: for p in dic_cluster_nb[c]: org = p.split("|")[0] if org not in dic_organism_cluster: dic_organism_cluster[org] = xmpz(0) dic_organism_cluster[org][int(c)] = 1 return dic_organism_cluster
def update_tid_bitsets(model,data, attributes,tid_bitsets): index_not_consider = xmpz(model.bitset_covered) index_not_consider = list(index_not_consider.iter_set()) for i_at in attributes: if attributes[i_at]["type"] == "numeric": # delete cut points from tidbitset and attributes for ncut in range(1,attributes[i_at]["ncutpoints"]+1): del attributes[i_at][(i_at,ncut)] del attributes[i_at][(i_at,-ncut)] del tid_bitsets[(i_at,ncut)] del tid_bitsets[(i_at,-ncut)] init_bitset_numeric(data,attributes,i_at,tid_bitsets,*index_not_consider)
def generate_p_q(self): g = self.N n = (self.L - 1) // g b = (L - 1) % g while True: while True: s = xmpz(randrange(1, 2**(g))) a = self.get_hash(s) zz = xmpz((s + 1) % (2**g)) z = self.get_hash(zz) U = int(a, 16) ^ int(z, 16) mask = 2**(N - 1) + 1 q = U | mask if is_prime(q, 20): break i, j = 0, 2 while i < 4096: V = [] for k in range(n + 1): arg = xmpz((s + j + k) % (2**g)) zzv = self.get_hash(arg) V.append(int(zzv, 16)) W = 0 for qq in range(n): W += V[qq] * 2**(160 * qq) W += (V[n] % 2**b) * 2**(160 * n) X = W + 2**(L - 1) c = X % (2 * q) p = X - c + 1 if p >= 2**(L - 1): if is_prime(p, 10): return p, q i += 1 j += n + 1
def generate_p_q(L, N): g = N # g >= 160 n = (L - 1) // g b = (L - 1) % g while True: # generate q while True: s = xmpz(randrange(1, 2**(g))) a = sha1(to_binary(s)).hexdigest() zz = xmpz((s + 1) % (2**g)) z = sha1(to_binary(zz)).hexdigest() U = int(a, 16) ^ int(z, 16) mask = 2**(N - 1) + 1 q = U | mask if is_prime(q, 20): break # generate p i = 0 # counter j = 2 # offset while i < 4096: V = [] for k in range(n + 1): arg = xmpz((s + j + k) % (2**g)) zzv = sha1(to_binary(arg)).hexdigest() V.append(int(zzv, 16)) W = 0 for qq in range(0, n): W += V[qq] * 2**(160 * qq) W += (V[n] % 2**b) * 2**(160 * n) X = W + 2**(L - 1) c = X % (2 * q) p = X - c + 1 # p = X - (c - 1) if p >= 2**(L - 1): if is_prime(p, 10): return p, q i += 1 j += n + 1
def compute_statistic_numeric(model,tid): statistic = dict() # pattern related part aux_bitset = xmpz(tid) idx_bits = list(aux_bitset.iter_set()) values = model.target_values[idx_bits] statistic["usage"] = values.size if statistic["usage"] > 1: #statistic["mean"],closest2,diff2 = compute_mean_and_twopoints(values,model.default_statistic["mean"]) statistic["mean"] = compute_mean(values) closest2,diff2 = find2points(values,model.default_statistic["mean"]) statistic["mean2"] = compute_mean(closest2) statistic["variance2"] = compute_RSS(closest2,statistic["mean2"])/2 statistic["RSS2"] = compute_RSS(closest2,model.default_statistic["mean"]) statistic["variance"] = compute_RSS(values,statistic["mean"])/statistic["usage"] statistic["RSS_default_pattern"] = compute_RSS(values,model.default_statistic["mean"]) else: statistic["mean"] = 0 statistic["variance"] = 0 statistic["variance2"] = 0 statistic["RSS2"] = 0 statistic["RSS_default_pattern"] = 0 # last rule related part bitset_default = xmpz(model.bitset_uncovered &~ tid) idx_bitsdef = list(bitset_default.iter_set()) values_uncovered = model.target_values[idx_bitsdef] statistic["usage_default"] = len(values_uncovered) if model.task == "discovery": if statistic["usage_default"]: statistic["RSS_default_uncovered"] =compute_RSS(values_uncovered,model.default_statistic["mean"]) else: statistic["RSS_default_uncovered"] = 0 elif model.task == "classification": statistic["mean_default"] = np.mean(values_uncovered) statistic["variance_default"] = np.var(values_uncovered) return statistic
def generate_p_q(self,L,N): #生成一个素数因子 g=N n=(L-1)//g b=(L-1)%g while True: while True: s=xmpz(randrange(1,2**(g))) #生成一个大素数g a=sha1(to_binary(s)).hexdigest() zz=xmpz((s+1)%(2**g)) z = sha1(to_binary(zz)).hexdigest() #hash值为160bit,通过生成随机字符串将 | 链接起来新城新的字符串来实现p和q的生成 U = int(a, 16) ^ int(z, 16) mask = 2 ** (N - 1) + 1 q = U | mask if is_prime(q, 20): #is_prime用来判断是否为素数因子 break # generate p #生成p i = 0 # counter j = 2 # offset while i < 4096: V = [] for k in range(n + 1): arg = xmpz((s + j + k) % (2 ** g)) zzv = sha1(to_binary(arg)).hexdigest() V.append(int(zzv, 16)) W = 0 for qq in range(0, n): W += V[qq] * 2 ** (160 * qq) W += (V[n] % 2 ** b) * 2 ** (160 * n) X = W + 2 ** (L - 1) c = X % (2 * q) p = X - c + 1 # p = X - (c - 1) if p >= 2 ** (L - 1): if is_prime(p, 10): return p, q #生成一个素数p:2^L-1<p<2^L,L为64的倍数 选取p-1的一个素数因子q,2^159<q<2^160 i += 1 j += n + 1
def generate_g(p, q): while True: h = randrange( 2, p - 1 ) #esto es H en nuestro diagrama osea el numero aleatorio que deberia ser 2 exp = xmpz( (p - 1) // q ) #esto es simplemente el cociente de 1024/160 = 6 que en nuestro diagrama es (P-1)/Q #exp = ((p - 1) // q) #esto es simplemente el cociente de 1024/160 = 6 que en nuestro diagrama es (P-1)/Q g = powmod( h, exp, p) #aqui se calcula G osea esto es simplemente (H^6) mod 1024 if g > 1: break return g
def decrypt(_c, _lambda, _m, _d, _mu, _n): c = gmpy2.xmpz(_c) lmda = gmpy2.xmpz(_lambda) m = gmpy2.xmpz(_m) d = gmpy2.xmpz(_d) mu = gmpy2.xmpz(_mu) n = gmpy2.xmpz(_n) b1 = f_mod(pow((f_mod(mul((((pow(c, lmda) \ % (pow(m, 2))-1))/m), mu), m)),d), n) return b1
def decrypt(_c, _lambda, _m, _d, _mu, _n): """ (M) = (((C^lambda mod (m^2)-1)/m)*mu mod m)^d mod n""" c = gmpy2.xmpz(_c) lmda = gmpy2.xmpz(_lambda) m = gmpy2.xmpz(_m) d = gmpy2.xmpz(_d) mu = gmpy2.xmpz(_mu) n = gmpy2.xmpz(_n) b1 = f_mod(pow((f_mod(mul((((pow(c, lmda) % (pow(m, 2))-1))/m), mu), m)),d), n) return b1
def encrypt(_g, _s, _e, _n, _m): r = gmpy2.xmpz(1) g = gmpy2.xmpz(_g) s = gmpy2.xmpz(_s) e = gmpy2.xmpz(_e) n = gmpy2.xmpz(_n) m = gmpy2.xmpz(_m) b1 = f_mod(e, n) b1 = pow(g, pow(s, b1)) b1 = mul(b1, f_mod(pow(r,m), pow(m,2))) return b1
def encrypt(_g, _s, _e, _n, _m): """C = (g^(M^(e mod n)))*((r^m)*mod (m^2))""" r = gmpy2.xmpz(1) g = gmpy2.xmpz(_g) s = gmpy2.xmpz(_s) e = gmpy2.xmpz(_e) n = gmpy2.xmpz(_n) m = gmpy2.xmpz(_m) b1 = f_mod(e, n) b1 = pow(g, pow(s, b1)) b1 = mul(b1, f_mod(pow(r,m), pow(m,2))) return b1
def computepi(): # N: number of decimals f = StringIO() w = xmpz(0) k = 1 n1 = xmpz(4) n2 = xmpz(3) d = xmpz(1) f10 = xmpz(10) n10 = xmpz(-10) i = 0 URL = request.args.get('URL', type=str) N = request.args.get('N', default=10, type=int) lenght = request.args.get('lenght', default=1, type=int) while True: # digit u = int(div(n1, d)) v = int(div(n2, d)) if u == v: f.write(chr(48 + u)) i += 1 if i % 10 == 0: f.write("\t:%d\n" % i) if i == N: break # extract u = mul(d, mul(n10, u)) n1 = mul(n1, f10) n1 = add(n1, u) n2 = mul(n2, f10) n2 = add(n2, u) else: # produce k2 = k << 1 u = mul(n1, k2 - 1) v = add(n2, n2) w = mul(n1, k - 1) n1 = add(u, v) u = mul(n2, k + 2) n2 = add(w, u) d = mul(d, k2 + 1) k += 1 if lenght != 0: contents = urllib.request.urlopen(URL + "?N=" + str(N) + "&lenght=" + str(lenght - 1)).read() return (f.getvalue())
def apply_power_bitmask_to_num(mask: str, num: int) -> list: queue = [xmpz(num)] for i in range(36): if mask[i] == '1': for j in range(len(queue)): queue[j][i] = 1 elif mask[i] == 'X': new_queue = [] for ele in queue: ele1 = ele.copy() ele1[i] = 0 new_queue.append(ele1) ele2 = ele.copy() ele2[i] = 1 new_queue.append(ele2) queue = new_queue return [int(x) for x in queue]
def computepi(): # N: number of decimals f = StringIO() w = xmpz(0) k = 1 n1 = xmpz(4) n2 = xmpz(3) d = xmpz(1) f10 = xmpz(10) n10 = xmpz(-10) i = 0 N = request.args.get('N', default=10, type=int) while True: # digit u = int(div(n1, d)) v = int(div(n2, d)) if u == v: f.write(chr(48 + u)) i += 1 if i % 10 == 0: f.write("\t:%d\n" % i) if i == N: break # extract u = mul(d, mul(n10, u)) n1 = mul(n1, f10) n1 = add(n1, u) n2 = mul(n2, f10) n2 = add(n2, u) else: # produce k2 = k << 1 u = mul(n1, k2 - 1) v = add(n2, n2) w = mul(n1, k - 1) n1 = add(u, v) u = mul(n2, k + 2) n2 = add(w, u) d = mul(d, k2 + 1) k += 1 return (f.getvalue())
def add_rule(self, subgroup2add, tid_bitsets, attributes): self.number_rules += 1 tid_cand = subgroup2add.bitset self.bitset_covered = self.bitset_covered | tid_cand self.support_covered = popcount(self.bitset_covered) self.bitset_uncovered = self.bitset_uncovered & ~tid_cand self.support_uncovered = popcount(self.bitset_uncovered) self.bitset_rules.append(tid_cand) self.antecedent_raw.append(subgroup2add.pattern) self.statistic_rules.append(subgroup2add.statistic) # IN CLASSIFICATION CASE EVERYTHING HAS TO BE UPDATED! self.default_statistic["usage"] = self.support_uncovered if self.task == "classification": aux_tid = xmpz(self.bitset_uncovered) idx_bitsdef = list(aux_tid.iter_set()) values_default = self.target_values[idx_bitsdef] self.default_mean["mean"] = np.mean(values_default) self.default_variance["variance"] = np.var(values_default) # SHOULD BE REMOVED LATER ON support = popcount(tid_cand) self.support_rules.append(support) self.add_pattern4prediction(subgroup2add.pattern, attributes) self.consequent_description.append( self.add_description_consequent(subgroup2add)) self.consequent_lastrule_description = self.add_consequent_lastrule() self.add_description_antecedent(subgroup2add.pattern, attributes) self.length_model = compute_length_model(self) self.length_data = compute_length_data[self.target_type](self) self.constant = delta_data_const[self.target_type](self) if self.length_original > 0: self.length_ratio = (self.length_data + self.length_model) / self.length_original elif self.length_original < 0: self.length_ratio = self.length_original / (self.length_data + self.length_model) return self
def main(): # CSF - Use gmpy2's divmod instead of the Python built-in, it's slightly faster divmod = f_divmod bprint = sys.stdout.buffer.write N = xmpz(int(sys.argv[1])) # CSF - Used by bprint below to save a few usec off each print line = "{:010d}\t:{}\n".format # CSF - Not very PEP friendly, but the runtime on this benchmark is low, and # this is faster than multiple single line assignments n, a, d, t, u, i, k, ns, k1 = map(xmpz, (1, 0, 1, 0, 0, 0, 0, 0, 1)) while True: k += 1 t = n << 1 n *= k a += t k1 += 2 a *= k1 d *= k1 if a >= n: t, u = divmod(n * 3 + a, d) u += n if d > u: ns = ns * 10 + t i += 1 if not i % 10: # CSF - faster way of saying if i % 10 == 0 bprint(line(ns, i).encode()) ns = 0 if i >= N: break a -= d * t a *= 10 n *= 10
def mkBitarray(self, index, partition='all'): """Produces a bitarray representing the presence / absence of the family in the pangenome using the provided index The bitarray is stored in the :attr:`bitarray` attribute and is a :class:`gmpy2.xmpz` type. :param index: The index computed by :func:`ppanggolin.pangenome.Pangenome.getIndex` :type index: dict[:class:`ppanggolin.genome.Organism`, int] :param partition: partition used to compute bitarray :type partition: str """ self.bitarray = gmpy2.xmpz(0) # pylint: disable=no-member if partition == 'all': logging.getLogger().debug(f"all") for org in self.organisms: self.bitarray[index[org]] = 1 elif partition in ['shell', 'cloud']: logging.getLogger().debug(f"shell, cloud") if self.namedPartition == partition: for org in self.organisms: self.bitarray[index[org]] = 1 elif partition == 'accessory': logging.getLogger().debug(f"accessory") if self.namedPartition in ['shell', 'cloud']: for org in self.organisms: self.bitarray[index[org]] = 1
def sieve_gmpy2_iter(limit): """ Returns a list of the prime numbers up to limit (from 0 to limit). A bit faster https://gmpy2.readthedocs.io/en/latest/advmpz.html """ # Increment by 1 to account for the fact that slices do not include # the last index value but we do want to include the last value for # calculating a list of primes. sieve_limit = gmpy2.isqrt(limit) + 1 limit += 1 # Mark bit positions 0 and 1 as not prime. bitmap = gmpy2.xmpz(3) # Process 2 separately. This allows us to use p+p for the step size # when sieving the remaining primes. bitmap[4:limit:2] = -1 # Sieve the remaining primes. for p in bitmap.iter_clear(3, sieve_limit): bitmap[p * p:limit:p + p] = -1 return bitmap.iter_clear(2, limit)
def sieve(limit=1000000): '''credit to: https://stackoverflow.com/questions/2897297/speed-up-bitstring-bit-operations-in-python Returns a generator that yields the prime numbers up to limit.''' # Increment by 1 to account for the fact that slices do not include # the last index value but we do want to include the last value for # calculating a list of primes. sieve_limit = gmpy2.isqrt(limit) + 1 limit += 1 # Mark bit positions 0 and 1 as not prime. bitmap = gmpy2.xmpz(3) # Process 2 separately. This allows us to use p+p for the step size # when sieving the remaining primes. bitmap[4 : limit : 2] = -1 # Sieve the remaining primes. for p in bitmap.iter_clear(3, sieve_limit): bitmap[p*p : limit : p+p] = -1 return bitmap.iter_clear(2, limit)
def mkBitarray(self, index): """ produces a bitarray representing the presence / absence of the family in the pangenome""" self.bitarray = gmpy2.xmpz(0) for org in self.organisms: self.bitarray[index[org]] = 1
def plotTime(p): n = (gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p))+(gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p)-1) while not miller_rabin.millerRabin(n, 2): n = (gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p))+(gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p)-1) print ("number: %d and bit: %d"%(n, bit_length(n)))
for n in mersenne: millis = int(round(time.time() * 1000000000000)) p = gmpy2.xmpz(n) s = gmpy2.xmpz(2) # (s**p)-1 millis = int(round(time.time() * 1000000000000)) - millis print "time: %d"%millis with open('file.txt', 'a') as f: f.write("%d\n"%(n)) with open('mersenne.txt', 'a') as f: f.write("%d\n"%(millis)) #countTime() a = gmpy2.xmpz(1) # use 4 for good result b = gmpy2.xmpz(500) env.digitParameter = a env.sample_string = " " # Step 1 env._p = 29 env._q = 31 env._r = 37 env._s = 41 env.allNUmbers = [] # Step 2 env._n = 0 env._m = 0 env._phi = 0 env._lambda = 0
def generateLargePrime(p): n = (gmpy2.xmpz(getRandom())**gmpy2.xmpz(p))+(gmpy2.xmpz(getRandom())**gmpy2.xmpz(p)-1) while not miller_rabin.millerRabin(n, 2): n = (gmpy2.xmpz(getRandom())**gmpy2.xmpz(p))+(gmpy2.xmpz(getRandom())**gmpy2.xmpz(p)-1) return n
if algorithmRabin_Miller(n, 64) == 1: return 1 return 0 # print(algorithmRabin_Miller(887, 10)) if __name__ == '__main__': count_numbers = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] i = 0 start = timeit.default_timer() start_total = start while True: if i == 100: break randbytes = os.urandom(384) randnumber = int.from_bytes(randbytes, 'big') randnumber = xmpz(randnumber) randnumber[0] = 1 randnumber[3071] = 1 # print(randnumber) if advanceRabin_Miller(randnumber) == 0: i += 1 stop = timeit.default_timer() took_time = stop - start start = timeit.default_timer() print(took_time) if 0 < took_time and took_time <= 1: count_numbers[0] += 1 elif 1 < took_time and took_time <= 2: count_numbers[1] += 1
# The Computer Language Benchmarks Game # http://benchmarksgame.alioth.debian.org/ # # contributed by Rene Bakker # fixed by Isaac Gouy import sys from io import StringIO from gmpy2 import xmpz, div, mul, add N = int(sys.argv[1]) f = StringIO() w = xmpz(0) k = 1 n1 = xmpz(4) n2 = xmpz(3) d = xmpz(1) f10 = xmpz(10) n10 = xmpz(-10) i = 0 while True: # digit u = int(div(n1, d)) v = int(div(n2, d)) if u == v: f.write(chr(48 + u)) i += 1 if i % 10 == 0:
# end of sqr_n_mult a1 = bit_width(2) print(a1) if __name__ == "__main__": MM = 3, 7, 11, 29, 59, 107, 239 random.seed() for M in range(0, 2**15): if (M % 2 == 0): continue a = random.randint(0, M-1) e = random.randint(0, M-1) j = (e.bit_length() or 1) e = gmpy2.xmpz(e) x = sqr_n_mult2(a, e, M) orig = pow(a, e, M) if x != orig: print("Error-B: n={0}; a={1}; e={2}; x={3}; orig={4};".format(M, a, e, x, orig)) # for n in range(pow(radix, start_bit), pow(radix, finish_bit)): # if (n % 2 == 0): # continue # print("Output: n={0}".format(n)) #http://younglinux.info/python/task/even-odd #https://habrahabr.ru/post/122538/ #http://forum.sources.ru/index.php?showtopic=348429
# partial unit test for gmpy2 xmpz functionality # relies on Tim Peters' "doctest.py" test-driver import gmpy2 as _g, doctest, sys, operator, gc __test__={} a=_g.xmpz(123) b=_g.xmpz(456) aa=_g.mpz(123) bb=_g.mpz(456) __test__['index']=\ r''' >>> a=_g.xmpz(123) >>> b=_g.xmpz(456) >>> range(333)[a] 123 >>> range(333)[b] Traceback (innermost last): ... IndexError: range object index out of range ''' __test__['elemop']=\ r''' >>> a=_g.xmpz(123) >>> b=_g.xmpz(456) >>> a+b mpz(579) >>> a-b mpz(-333) >>> a*b