示例#1
0
def shorfactor(n, p_alg=findp, seed=None):
    random.seed(seed)
    keepgoing = True
    guess1 = None
    guess2 = None
    global steps
    g = 2
    while (keepgoing):
        steps += 1
        g += 1  #random.randint(2,n-1)
        if VERBOSE: print("g is ", g)
        common_den = np.gcd(g, n)
        if (common_den != 1):
            if VERBOSE: print("g shares a factor with n, breaking out")
            result = common_den, n // common_den
            guessG(n, result[1])
            return result
        p = p_alg(n, g)
        guess1 = g**(p // 2) + 1
        if VERBOSE: print(guess1)
        guess2 = g**(p // 2) - 1
        #print(guess2)
        if (p % 2 == 1):
            continue
        if (guess1 % n == 0):
            continue
        if (guess2 % n == 0):
            continue
        keepgoing = False
    factor1 = np.gcd(guess1, n)
    result = (factor1, n // factor1)
    guessG(n, result[1])
    return (factor1, n // factor1)
def comp_periodicity_spatial(self):
    """Compute the (anti)-periodicities of the machine in space domain

    Parameters
    ----------
    self : Machine
        A Machine object

    Returns
    -------
    pera : int
        Number of spatial periodicities of the machine over 2*pi
    is_apera : bool
        True if an anti-periodicity is possible after the periodicities
    """

    p = self.get_pole_pair_number()

    # Get stator (anti)-periodicity in spatial domain
    pera_s, is_antipera_s = self.stator.comp_periodicity_spatial()

    # Get rotor (anti)-periodicities in spatial domain
    pera_r, is_antipera_r = self.rotor.comp_periodicity_spatial()

    # Get machine spatial periodicity
    pera = int(gcd(gcd(pera_s, pera_r), p))

    # Get machine time and spatial anti-periodicities
    is_apera = bool(is_antipera_s and is_antipera_r)

    return pera, is_apera
示例#3
0
def poly_template_transform(data, var_names, run_ids=None, growth_pruning=False, num_cross=None, max_deg=2, var_degs=None, v=False, max_relative_degree=None, max_poly_const_terms=1, problem_number=0, drop_high_order_consts=False, limit_poly_terms_to_unique_vars=False):
    data_dict = {var_names[i]:data[:, i] for i in range(1, len(var_names))}

    terms_dict = get_poly_template(data, var_names, growth_pruning, run_ids=run_ids, num_cross=num_cross, max_deg=max_deg, var_degs_assigned=var_degs, v=v, max_relative_degree=max_relative_degree, max_poly_const_terms=max_poly_const_terms, limit_poly_terms_to_unique_vars=limit_poly_terms_to_unique_vars, drop_high_order_consts=drop_high_order_consts)
    if v:
        print("filtered poly terms: {}".format(", ".join(terms_dict.keys())))


    new_data, new_var_names = [], []
    term_tuple_list = sorted([(k, v) for k, v in terms_dict.items()])
    for i in range(len(term_tuple_list)):
        term_str, term_degs = term_tuple_list[i]
        new_var_names.append(term_str)
        new_col = np.ones(len(data), dtype=np.float)
        for var, var_deg in term_degs.items():
            new_col *= data_dict[var] ** var_deg
        new_data.append(new_col)
    if problem_number in ('egcd2', 'egcd3', 'lcm1', 'lcm2'):
        new_data.append(np.gcd(data_dict['x'].astype(int), data_dict['y'].astype(int)))
        new_data.append(np.gcd(data_dict['a'].astype(int), data_dict['b'].astype(int)))
        new_var_names += ['GCD(x,y)', 'GCD(a,b)']
    elif problem_number in ('fermat1', 'fermat2'):
        new_data.append(data_dict['u'].astype(int) % 2)
        new_data.append(data_dict['v'].astype(int) % 2)
        new_var_names += ['(% u 2)', '(% v 2)']
    new_data = np.asarray(new_data).transpose()
    return new_data, new_var_names
示例#4
0
def getinvmodmat2(A, M):
    """
    :param A: 矩阵
    :param M: 模
    :return: 模逆矩阵
    """
    N = len(A)
    E = np.eye(N, dtype=int)  # 单位矩阵
    # print(E)
    A = np.hstack((A, E))  # 合并
    print(A)
    for j in range(N):
        for i in range(j, N):
            if np.gcd(A.item(i, j), M) > 1:
                print(np.gcd(A.item(i, j), M))
                continue
            tmp = mul_inv(A.item(i, j), M)
            print(j, i, tmp)
            A[i] *= tmp
            A[i] %= M
            if i != j:
                A[[i, j], :] = A[[j, i], :]
            break
        for i in range(N):
            if i != j:
                A[i] -= (A.item(i, j) * A[j] % M)
                # A[i]=A[i]*A.item(j,j)-A[j]*A.item(i,j)
                A[i] %= M
        print(A)
    return A[:, N:N * 2]
示例#5
0
def shor(N):
    if isprime(N): 
        #print('{} is a prime'.format(N))
        return -1
    if N%2==0: 
        #print('{}: N is even'.format(N))
        return 2
    for i in range(3,int(np.log2(N))):
        j=i
        while(j<=N):
            if j==N: 
                #print(str(N)+': trivially, N={}^a for some int a'.format(i))
                return i
            j*=i
    while(1):
        x=random.randint(2,int(np.sqrt(N)))
        if (np.gcd(x,N)!=1): 
            #print('{}: by randoming, got factor {}'.format(N,np.gcd(x,N)))
            return(np.gcd(x,N))
        #print('Attempting: N={}, x={}'.format(N,x))
        r=classical_find_r_U(x,N)
        #print('found r={}'.format(r))
        if (r%2==1): continue
        #print('(N,x,r)=({},{},{})'.format(N,x,r))
        a=np.gcd((x**(int(r/2))-1)%N,N)
        #print('a={}, ie {}'.format(a, int(a)))
        if (a==1 or a==N): continue
        print('{}: Found (N,x,r)={},{},{} with factor {}'.format(N,N,x,r,np.gcd(int(a),N)))
        return np.gcd(int(a), N)
示例#6
0
    def mask_sphere(self, radius, cx, cy, cz):
        """
        Create a mask for a sphere with radius=radius, centered at cx, cy, cz.

        Args:
            radius: (flaot) of the mask (in Angstroms)
            cx, cy, cz: (float) the fractional coordinates of the center of the sphere
        """
        dx, dy, dz = (
            np.floor(radius / np.linalg.norm(self.X)).astype(int),
            np.floor(radius / np.linalg.norm(self.Y)).astype(int),
            np.floor(radius / np.linalg.norm(self.Z)).astype(int),
        )
        gcd = max(np.gcd(dx, dy), np.gcd(dy, dz), np.gcd(dx, dz))
        sx, sy, sz = dx // gcd, dy // gcd, dz // gcd
        r = min(dx, dy, dz)

        x0, y0, z0 = int(np.round(self.NX * cx)), int(np.round(self.NY * cy)), int(np.round(self.NZ * cz))

        centerx, centery, centerz = self.NX // 2, self.NY // 2, self.NZ // 2
        a = np.roll(self.data, (centerx - x0, centery - y0, centerz - z0))

        i, j, k = np.indices(a.shape, sparse=True)
        a = np.sqrt((sx * i - sx * centerx) ** 2 + (sy * j - sy * centery) ** 2 + (sz * k - sz * centerz) ** 2)

        indices = a > r
        a[indices] = 0

        return a
示例#7
0
    def makeslab(self, miller_index, length=-1.0, layer=-1, method="bf", origin_shift=0.0, vacuum=15.0):
        '''
        self is unit-cell
        '''
        P=np.mat(np.eye(3, dtype=np.float64))
        h,k,l=miller_index
        e=np.gcd(h, np.gcd(k,l))
        if e>1:
            h=h//e
            k=k//e
            l=l//e
            print("find miller index ( ",h,k,l," ) instead.")

        if h==0 and k==0 and l==0:
            print("miller_index cannot be 0 0 0!")
            raise AssertionError
        elif h==0 and k==0:
            P[2,2]=layer
        elif k==0 and l==0: # slab not along z
            P[0,0]=layer
        elif h==0 and l==0:
            P[1,1]=layer
        else:
            p,q, _ =ext_euclid(k,l)
            c1,c2,c3=self.cell
    
            k1=np.dot(p*(k*c1-h*c2)+q*(l*c1-h*c3), l*c2-k*c3)
            k2=np.dot(l*(k*c1-h*c2)-k*(l*c1-h*c3), l*c2-k*c3)
            if np.fabs(k2) > self.eps1:
                i=-int(round(k1/k2))
                p,q=p+i*l,q-i*k
            
            P[0]=(p * k + q * l, -p * h, -q * h)
            P[1]=np.array((0, l, -k)) // abs(np.gcd(l, k))
            P[2]=(h,k,l)
            P[2]*=layer
            P=P.T
      
        if np.linalg.det(P)<0:
            P[0,2]*=-1
            P[1,2]*=-1
            P[2,2]*=-1
        print("P1 = ", P)

        slab=CELL.cell2supercell(self,P)
        slab.cell_redefine()
        slab.add_vacuum(vacuum)

        reduced_slab=CELL.reduce_slab(slab)
        print("reduced slab cell \n", reduced_slab.cell)
        ang_B=fan(reduced_slab.cell[0],reduced_slab.cell[1])
        edg_a=np.linalg.norm(reduced_slab.cell[0])
        edg_c=np.linalg.norm(reduced_slab.cell[1])
        print("reduced slab No. of atoms: ", reduced_slab.nat)
        print("slab and vacuum length: ", abs(reduced_slab.cell[2,2])-reduced_slab.get_vac(), reduced_slab.get_vac(), "Ang.")
        print("inplane edge and angle: ", edg_a, edg_c," Ang. ", ang_B," degree.")
        print("reduced slab cell area: ", np.sin(ang_B/180*np.pi)*edg_a*edg_c, " Ang^2.")

        return reduced_slab
示例#8
0
def eratosthenes(n, lim):
    Beta = [-1, 2]
    for num in PRIMES[1:]:
        if gmpy2.legendre(n, num) == 1:
            Beta.append(num)
            if len(Beta) == lim - 1:
                break
    m = int(math.sqrt(n))
    df = pd.DataFrame({
        'x': interval,
        'x + m': [x + m for x in interval],
        'b': [q(x, m, n) for x in interval]
    })
    df['lg|b|'] = [math.log(abs(b), 10) for b in df['b']]
    for p in Beta[1:]:
        X = []
        for x in congruence(m, n, p):
            X = find_x('+', X, x, p, lim)
            X = find_x('-', X, x, p, lim)
        if X == []:
            Beta.remove(p)
        else:
            df[f'lg{p}'] = [(1 if df['x'][i] in X else 0)
                            for i in range(len(interval))]
            if sum(df[f'lg{p}'].values) == 1:
                del df[f'lg{p}']
                Beta.remove(p)
    diff = []
    for i in range(len(interval)):
        ans = df['lgb'][i]
        for p in Beta[1:]:
            ans -= math.log(p, 10) * df[f'lg{p}'][i]
        diff.append(ans)
    df['lg|b| - lgp'] = diff
    potential_Beta_numbers = df['b'][
        df['lg|b| - lgp'] < np.mean(df['lg|b| - lgp'])].values.tolist()
    vectors, Beta_numbers = [], []
    for num in potential_Beta_numbers:
        factorization = factorization_small_n(num)
        if np.in1d(factorization, Beta).all():
            Beta_numbers.append(num)
            vectors.append([factorization.count(j) for j in Beta])
    df['v'] = [(vectors[Beta_numbers.index(num)]
                if num in Beta_numbers else 'Not Beta') for num in df['b']]
    for comb in combination(vectors):
        if sum_vectors(comb) == len(Beta) * [0]:
            X, Y = 1, 1
            for v in comb:
                X *= int(
                    df['x + m'][df['b'] == Beta_numbers[vectors.index(v)]])
                Y *= int(abs(Beta_numbers[vectors.index(v)]))
            Y = int(math.sqrt(Y))
            d = np.gcd(X + Y, n)
            if 1 < d < n:
                return d, df
            d = np.gcd(X - Y, n)
            if 1 < d < n:
                return d, df
    return 0
示例#9
0
def _gcd_recursive(*args):
    """
    Get the greatest common denominator among any number of ints
    """
    if len(args) == 2:
        return np.gcd(*args)
    else:
        return np.gcd(args[0], _gcd_recursive(*args[1:]))
示例#10
0
def getqpm(rate, bdepth, maxp=100):
    """Return integer (q,p,m) such that q/p ~ rate, m/p ~ bdepth."""
    denom = np.arange(maxp, dtype=np.int64) + 1
    rerr, berr = denom * rate, denom * bdepth
    err = (rerr - np.floor(rerr) + berr - np.floor(berr)) / denom
    p = denom[np.argmin(err)]
    q = np.int64(np.floor(rate * p))
    m = np.int64(np.floor(bdepth * p))
    gcd = np.gcd(np.gcd(q, p), m)
    return q // gcd, p // gcd, m // gcd
示例#11
0
def find_co_prime_stochastic(n):
    """
    Find a factor that is coprime with n randomly.
    :param n:  The number that should be factored into primes.
    :return: A number co-prime with n.
    """
    a = int(np.random.randint(2, high=n))
    if np.gcd(n, a) != 1:
        return -np.gcd(n, a)
    return a
示例#12
0
    def run(self, fnprovider):
        fns = {}

        size = 1500

        fn = fnprovider.get_filename('.png', 'pairwise')
        fns['Are pairwise divisible'] = fn
        Image.fromarray(
            np.fromfunction(
                lambda x, y: 255 * np.uint8(np.gcd(x + 1, y + 1) != 1),
                shape=(size, size),
                dtype=np.uint32), 'L').save(fn)

        fn = fnprovider.get_filename('.png', 'gcd')
        fns['GCD / MAX'] = fn
        Image.fromarray(
            np.fromfunction(lambda x, y: np.uint8(
                np.gcd(x + 1, y + 1) * 255 / np.maximum(x + 1, y + 1)),
                            shape=(size, size),
                            dtype=np.uint32), 'L').save(fn)

        fn = fnprovider.get_filename('.png', 'gcdmod')
        fns['GCD mod time'] = fn
        stepsmod: np.ndarray = np.fromfunction(
            lambda x, y: np.vectorize(gcd_mod)(x + 1, y + 1),
            shape=(size, size),
            dtype=np.uint32)
        stepsmod_n = stepsmod * 255 / np.max(stepsmod)
        Image.fromarray(np.uint8(stepsmod_n), 'L').save(fn)

        fn = fnprovider.get_filename('.png', 'gcdsub')
        fns['GCD sub time'] = fn
        stepssub: np.ndarray = np.fromfunction(
            lambda x, y: np.vectorize(gcd_sub)(x + 1, y + 1),
            shape=(size, size),
            dtype=np.uint32)
        stepssub_n = stepssub * 255 / np.max(
            stepssub)  # Normalize the step counts
        Image.fromarray(np.uint8(stepssub_n), 'L').save(fn)

        fn = fnprovider.get_filename('.png', 'gcdboth')
        fns['both GCD (blue = SUB, red = MOD) (log scale)'] = fn
        stepssub = np.log(stepssub)
        stepsmod = np.log(stepsmod)
        mx = max(np.max(stepssub), np.max(stepsmod))
        stepssub_n2 = np.uint8(stepssub * 255 / mx)
        stepsmod_n2 = np.uint8(stepsmod * 255 / mx)
        Image.fromarray(
            np.dstack((stepssub_n2, np.zeros(
                (size, size), dtype=np.uint8), stepsmod_n2)), 'RGB').save(fn)

        return ',\n'.join('{}: {}'.format(i, j) for i, j in fns.items())
示例#13
0
def CFRAC_Brillhart_Morrison(n, lim, v=1, Beta=[-1, 2], d=0):
    TABLE = pd.DataFrame({
        'S': ['a', 'bmodn', 'b^2modn'],
        '-1': ['-', 1, 1]
    }).set_index('S')
    alpha = math.sqrt(n)
    a = int(alpha)
    u = a
    b = counting_b2(a, n)
    TABLE['0'] = [a, a, b]
    Beta, w = update_factor_base(Beta, n, b, [], TABLE, lim)
    it = 1
    while True:
        v1 = (n - u**2) / v
        alpha1 = (alpha + u) / v1
        a1 = int(alpha1)
        u1 = v1 * a1 - u
        b = (a1 * TABLE[f'{it-1}'][1] + TABLE[f'{it-2}'][1]) % n
        b2 = counting_b2(b, n)
        TABLE[f'{it}'] = [a1, b, b2]
        Beta, w = update_factor_base(Beta, n, b2, w, TABLE, lim)
        for comb in combination(w):
            if sum_vectors(comb) == (len(Beta)) * [0]:
                X, Y = 1, 1
                for vec in comb[:-1]:
                    X = float(X * TABLE[f'{w.index(vec)}'][1] % n)
                    Y = float(Y * TABLE[f'{w.index(vec)}'][2])
                X = X * TABLE[f'{it}'][1] % n
                Y = math.sqrt(Y * TABLE[f'{it}'][2])
                d1 = np.gcd(int(X + Y), n)
                if 1 < d1 < n:
                    d = d1
                    break
                else:
                    d2 = np.gcd(int(X - Y), n)
                    if 1 < d2 < n:
                        d = d2
                        break
        if d == 0:
            it += 1
            if it > 21:
                return 0
            u = u1
            v = v1
            a = a1
        else:
            print(TABLE)
            print('\nФактор-база: ', Beta)
            for vec in comb[:-1]:
                print(f'\nν_{w.index(vec)} = {w[w.index(vec)]}')
            print(f'\nν_{it} = {w[it]}')
            return d
示例#14
0
def rational_sum(numerator, denominator, *argv):
    """Sum of rational numbers."""
    if len(argv) < 2:
        gcd = numpy.gcd(numerator, denominator)
        num_out, den_out = numerator//gcd, denominator//gcd
    else:
        num_2 = argv[0]
        den_2 = argv[1]
        num_3 = numerator*den_2 + num_2*denominator
        den_3 = denominator*den_2
        gcd = numpy.gcd(num_3, den_3)
        num_out, den_out = rational_sum(num_3//gcd, den_3//gcd, argv[2:])
    return num_out, den_out
示例#15
0
def addell(p1, p2, a, b, n):
    '''This function add points on the elliptic curve
    y^2 = x^3 + ax + b mod n
    The points are represented by
    p1(1) = x1    p1(2) = y1
    p2(1) = x2    p2(2) = y2'''
    if p1[0] == Inf and p1[1] == Inf:
        return p2

    if p2[0] == Inf and p2[1] == Inf:
        return p1

    x1 = p1[0]
    x2 = p2[0]
    y1 = p1[1]
    y2 = p2[1]
    z1 = 1  # this will store the gcd incase the addition produced a factor of n

    if (x1 == x2) and ((y1 == y2 == 0) or (y1 != y2)):  # infinity cases
        return (inf, inf)

    if p1 == p2 and np.gcd(y1, n) != 1 and np.gcd(y1, n) != n:
        z1 = np.gcd(y1, n)
        #print('Elliptic Curve addition produced a factor of n, factor = ', z1)
        return (z1, )

    if p1 == p2:
        temp = np.mod(2 * y1, n)
        if temp == 0:
            return (inf, inf)
        den = invmodn(2 * y1, n)
        num = np.mod(x1 * x1, n)
        num = np.mod(np.mod(3 * num, n) + a, n)
    else:  # case p1 ~= p2
        if np.gcd(x2 - x1, n) != 1:
            #print('Elliptic Curve addition produced a factor of n, factor = ', np.gcd(x2 - x1, n))
            return (np.gcd(x2 - x1, n), )
        temp = np.mod(x2 - x1, n)
        if np.mod(n, temp) == 0:  # Infinity case
            return (inf, inf)
        den = invmodn(temp, n)
        num = np.mod(y2 - y1, n)

    m = np.mod(num * den, n)
    temp = np.mod(m * m, n)
    x3 = np.mod(temp - x1 - x2, n)
    temp = x1 - x3
    y3 = np.mod(m * temp, n)
    y3 = np.mod(y3 - y1, n)
    return (x3, y3)
示例#16
0
 def compute_shors(self):
     """
     Putting the states through shor's\n
     @return: Output of Shors \n
     """
     thing = Shors.compute_r(self)
     r = Shors.cf(self, thing)
     while r % 2 != 0:
         r = Shors.cf(self, Shors.compute_r(self))
         a1 = int((self.a**(r / 2)) - 1)
         a2 = int((self.a**(r / 2)) + 1)
     a1 = int((self.a**(r / 2)) - 1)
     a2 = int((self.a**(r / 2)) + 1)
     return np.gcd(a1, self.N), np.gcd(a2, self.N)
示例#17
0
def dS(p, q, i):
    # checking input values are positive integers
    if ((not isinstance(p, int)) or (p <= 0)):
        raise Exception('p must be a positive integer. p was {}.'.format(p))

    # if ((not isinstance(q, int)) or (q <= 0)):
    #     raise Exception('q must be a positive integer. q was {}.'.format(q))

    if ((not isinstance(i, int)) or (i < 0)):
        raise Exception(
            'i must be a non-negative integer. i was {}.'.format(i))

    # checking p and q are coprime
    if (np.gcd(p, q) != 1):
        raise Exception('p and q must be coprime.')

    # we terminate if p = 1
    if (p == 1):
        return Fraction(0, 1)

    q = q % p
    i = i % p
    r = Fraction(((((2 * i) + 1 - p - q)**2) - p * q), (4 * p * q))
    # our recursive step is safe since we have checked for bad input and reduced q and i mod p.
    return r - dS(q, p, i)
示例#18
0
def preprocess(stream):
    # time alignment
    start_time = max([trace.stats.starttime for trace in stream])
    end_time = min([trace.stats.endtime for trace in stream])
    if start_time>end_time: print('bad data!'); return []
    st = stream.slice(start_time, end_time)
    # resample data
    org_rate = int(st[0].stats.sampling_rate)
    rate = np.gcd(org_rate, samp_rate)
    if rate==1: print('warning: bad sampling rate!'); return []
    decim_factor = int(org_rate / rate)
    resamp_factor = int(samp_rate / rate)
    st = st.decimate(decim_factor)
    if resamp_factor!=1: st = st.interpolate(samp_rate)
    # filter
    st = st.detrend('demean').detrend('linear').taper(max_percentage=0.05, max_length=10.)
    freq_min, freq_max = freq_band
    if freq_min and freq_max:
        return st.filter('bandpass', freqmin=freq_min, freqmax=freq_max)
    elif not freq_max and freq_min:
        return st.filter('highpass', freq=freq_min)
    elif not freq_min and freq_max:
        return st.filter('lowpass', freq=freq_max)
    else:
        print('filter type not supported!'); return []
示例#19
0
def zero_determinant_binary_reduction(form):
    if form.det() != 0:
        raise Exception("The determinant of the form", form, "isn't zero.")

    a = form[0, 0]
    b = form[0, 1]
    c = form[1, 1]

    if a == 0:
        raise Exception(
            "Something's wrong. This method shouldn't have been called, right?"
        )

    m = np.gcd(a, c)

    # Why math and not numpy? Because numpy yields a stupid error. It's easier just to use math in this case.
    g = int(math.sqrt(a // m))
    h = int(math.sqrt(c // m))

    if g * h != b // m:
        h *= -1

    _, goth_g, goth_h = xgcd(g, h)

    return Matrix([[h, h + goth_g], [-g, -g + goth_h]])
示例#20
0
def are_pairwise_coprime(l):
    return functools.reduce(
        lambda x, y: x and y,
        list(
            map(lambda t: np.gcd(t[0], t[1]) == 1,
                [[l[i], l[j]] for i in range(len(l))
                 for j in range(len(l)) if i != j])))
示例#21
0
	def resample(self, data, src_rate=30, dst_rate=50, axis=0):
		"""
		Upsamples data using FFT
		"""
		denom = np.gcd(dst_rate, src_rate)
		new_data = signal.resample_poly(data, dst_rate/denom, src_rate/denom, axis)
		return new_data
示例#22
0
文件: utils.py 项目: sgraetzer/pystoi
def _resample_window_oct(p, q):
    """Port of Octave code to Python"""

    gcd = np.gcd(p, q)
    if gcd > 1:
        p /= gcd
        q /= gcd

    # Properties of the antialiasing filter
    log10_rejection = -3.0
    stopband_cutoff_f = 1. / (2 * max(p, q))
    roll_off_width = stopband_cutoff_f / 10

    # Determine filter length
    rejection_dB = -20 * log10_rejection
    L = np.ceil((rejection_dB - 8) / (28.714 * roll_off_width))

    # Ideal sinc filter
    t = np.arange(-L, L + 1)
    ideal_filter = 2 * p * stopband_cutoff_f \
        * np.sinc(2 * stopband_cutoff_f * t)

    # Determine parameter of Kaiser window
    if (rejection_dB >= 21) and (rejection_dB <= 50):
        beta = 0.5842 * (rejection_dB - 21)**0.4 \
            + 0.07886 * (rejection_dB - 21)
    elif rejection_dB > 50:
        beta = 0.1102 * (rejection_dB - 8.7)
    else:
        beta = 0.0

    # Apodize ideal filter response
    h = np.kaiser(2 * L + 1, beta) * ideal_filter

    return h
示例#23
0
文件: script.py 项目: szsctt/AOC_2020
def main():

    # read input
    with open("input", 'r') as inhandle:
        arrival = int(next(inhandle).strip())
        busses = next(inhandle).strip().split(",")

    #least_min_waited, earliest_bus_number = find_earliest_bus(arrival, busses)
    #print(f"min waited: {least_min_waited}, bus num: {earliest_bus_number}, product: {least_min_waited*earliest_bus_number}")

    a = []
    n = []
    for i, bus in enumerate(busses):
        if bus == 'x':
            continue
        print(f"bus {bus} at time t + {i}")
        a.append(i)
        n.append(int(bus))

    # check pairwaise coprime
    for i in n:
        for j in n:
            if i == j:
                continue
            if np.gcd(i, j) != 1:
                print(f"{i}, {j} are not coprime")

    i = satisfy_constraints(busses)
    print(int(i))
    print()
示例#24
0
def calc1(data):
    a = np.array([list(line) for line in data.split()])
    coords = np.argwhere(a.T == '#')
    # print('coords', coords)

    counts = np.zeros(coords.shape[0], int)
    for i in range(len(coords)):
        cx, cy = coords[i]
        rest = np.delete(coords, i, axis=0)
        rest -= coords[i]
        # print('considering:', cx, cy)
        # print('rebased', rest)
        rest = np.array(sorted(rest, key=lambda p: np.max(np.abs(p))))
        # print('sorted', rest)
        masked = np.zeros(len(rest), bool)
        for j in range(len(rest)-1):
            if masked[j]:
                continue
            # print('masking:', rest[j])
            x, y = rest[j] // np.gcd(*rest[j])
            # eg (4, 2) masks (2, 1), (4, 2), (6, 3)
            # multiples
            ds = rest[j+1:,0] * y == rest[j+1:,1] * x
            # print(ds)
            # same quarter
            q = ((rest[j+1:] * rest[j]) >= 0).all(axis=1)
            # print(q)
            masked[j+1:] = masked[j+1:] | (ds & q)
            # print('result:', masked)

        # print('amsked:', masked)
        sees = (~masked).sum()
        counts[i] = sees

    return coords, counts
示例#25
0
def med(x):
    '''
    Calculate the middle divisor. This program is used to determine the optimal grid line spacing for regional maps.

    Usage: 
    y = med(x)
    
    Inputs:
    x -> [int] The longitude or latitude span of a map. It cannot be a prime number.

    Outputs:
    y -> [int] the optimal grid line spacing

    Examples:
    >>> print(med(20))
    4
    >>> print(med(25))
    5
    '''
    y = np.unique(np.gcd(np.arange(x),x))
    n = len(y)
    if n%2 == 1:
        return y[n//2]
    else:
        return y[n//2-1]
示例#26
0
def zcsequence(u, seq_length, q=0):
    """
    Generate a Zadoff-Chu (ZC) sequence.
    Parameters
    ----------
    u : int
        Root index of the the ZC sequence: u>0.
    seq_length : int
        Length of the sequence to be generated. Usually a prime number:
        u<seq_length, greatest-common-denominator(u,seq_length)=1.
    q : int
        Cyclic shift of the sequence (default 0).
    Returns
    -------
    zcseq : 1D ndarray of complex floats
        ZC sequence generated.
    """

    for el in [u, seq_length, q]:
        if not float(el).is_integer():
            raise ValueError('{} is not an integer'.format(el))
    if u <= 0:
        raise ValueError('u is not stricly positive')
    if u >= seq_length:
        raise ValueError('u is not stricly smaller than seq_length')
    if np.gcd(u, seq_length) != 1:
        raise ValueError(
            'the greatest common denominator of u and seq_length is not 1')

    cf = seq_length % 2
    n = np.arange(seq_length)
    zcseq = np.exp(-1j * np.pi * u * n * (n + cf + 2. * q) / seq_length)

    return zcseq
示例#27
0
def main():
    continueflag = ''
    while continueflag != 'quit':
        #get and validate operation from user
        operation = input('encipher (e) or decipher (d): ')
        while operation != 'e' and operation != 'd':
            operation = input('encipher (e) or decipher (d): ')
        message = input('message: ')
        #get the a and b keys
        flag = False
        while not flag:
            try:
                a = int(input('a: '))
                b = int(input('b: '))
                #make sure a and m (the length
                #of the alphabet) are coprime
                if np.gcd(a, len(ALPHABET)) == 1:
                    flag = True
                else:
                    print(f'length of alphabet, m, is {len(ALPHABET)}')
                    print('a and m must be coprime')
            #make sure a and b are integers
            except TypeError:
                pass
        if operation == 'e':
            ciphertext = encipher(message, a, b)
            print(f'ciphertext: {ciphertext}')
        else:
            plaintext = decipher(message, a, b)
            print(f'plaintext: {plaintext}')
        #see if the user wants to quit
        continueflag = input("'quit' to quit: ")
    print('exiting')
示例#28
0
def phi(n):
    num = 0
    for i in range(1, n):
        if np.gcd(i, n) == 1:
            num += 1

    return num
示例#29
0
def _get_n_batches_and_batch_sizes(
        n1: int, n2: int, batch_size: int,
        device_count: int) -> Tuple[int, int, int, int]:
    # TODO(romann): if dropout batching works for different batch sizes, relax.
    max_serial_batch_size = onp.gcd(n1, n2) // device_count

    n2_batch_size = min(batch_size, max_serial_batch_size)
    if n2_batch_size != batch_size:
        warnings.warn(
            'Batch size is reduced from requested %d to effective %d to '
            'fit the dataset.' % (batch_size, n2_batch_size))

    n1_batch_size = n2_batch_size * device_count
    n1_batches, ragged = divmod(n1, n1_batch_size)
    if ragged:
        # TODO(schsam): Relax this constraint.
        msg = (
            'Number of rows of kernel must divide batch size. Found n1 = {} '
            'and batch size = {}.').format(n1, n1_batch_size)
        if device_count > 1:
            msg += (
                ' Note that device parallelism was detected and so the batch '
                'size was expanded by a factor of {}.'.format(device_count))
        raise ValueError(msg)

    n2_batches, ragged = divmod(n2, n2_batch_size)
    if ragged:
        # TODO(schsam): Relax this constraint.
        raise ValueError(('Number of columns of kernel must divide batch '
                          'size. Found n2 = {} '
                          'and batch size = {}').format(n2, n2_batch_size))
    return n1_batches, n1_batch_size, n2_batches, n2_batch_size
示例#30
0
def get_visible(coord, mapp):

    direction_bins = {}
    for tgt in mapp:
        if np.array_equal(tgt, coord):
            continue
        diff = tgt - coord
        x = diff[0]
        y = diff[1]
        n = np.gcd(x, y)

        distance = np.linalg.norm(diff)
        direction = tuple(diff / n)

        # print("{:10s} {:10d} {:10g} {:20s}".format(str(tgt),n,distance,str(direction)))

        if not direction in direction_bins:
            direction_bins[direction] = []

        direction_bins[direction].append((distance, tgt))

    angle_bins = {my_angle(d): sorted(v) for d, v in direction_bins.items()}

    # for angle in angle_bins:
    # print("{:15s} {}".format(str(angle),angle_bins[angle]))

    return angle_bins
示例#31
0
def resample(y, orig_sr, target_sr, res_type='kaiser_best', fix=True, scale=False, **kwargs):
    """Resample a time series from orig_sr to target_sr

    Parameters
    ----------
    y : np.ndarray [shape=(n,) or shape=(2, n)]
        audio time series.  Can be mono or stereo.

    orig_sr : number > 0 [scalar]
        original sampling rate of `y`

    target_sr : number > 0 [scalar]
        target sampling rate

    res_type : str
        resample type (see note)

        .. note::
            By default, this uses `resampy`'s high-quality mode ('kaiser_best').

            To use a faster method, set `res_type='kaiser_fast'`.

            To use `scipy.signal.resample`, set `res_type='fft'` or `res_type='scipy'`.

            To use `scipy.signal.resample_poly`, set `res_type='polyphase'`.

        .. note::
            When using `res_type='polyphase'`, only integer sampling rates are
            supported.

    fix : bool
        adjust the length of the resampled signal to be of size exactly
        `ceil(target_sr * len(y) / orig_sr)`

    scale : bool
        Scale the resampled signal so that `y` and `y_hat` have approximately
        equal total energy.

    kwargs : additional keyword arguments
        If `fix==True`, additional keyword arguments to pass to
        `librosa.util.fix_length`.

    Returns
    -------
    y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]
        `y` resampled from `orig_sr` to `target_sr`

    Raises
    ------
    ParameterError
        If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both
        integer-valued.

    See Also
    --------
    librosa.util.fix_length
    scipy.signal.resample
    resampy.resample

    Notes
    -----
    This function caches at level 20.

    Examples
    --------
    Downsample from 22 KHz to 8 KHz

    >>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050)
    >>> y_8k = librosa.resample(y, sr, 8000)
    >>> y.shape, y_8k.shape
    ((1355168,), (491671,))
    """

    # First, validate the audio buffer
    util.valid_audio(y, mono=False)

    if orig_sr == target_sr:
        return y

    ratio = float(target_sr) / orig_sr

    n_samples = int(np.ceil(y.shape[-1] * ratio))

    if res_type in ('scipy', 'fft'):
        y_hat = scipy.signal.resample(y, n_samples, axis=-1)
    elif res_type == 'polyphase':
        if int(orig_sr) != orig_sr or int(target_sr) != target_sr:
            raise ParameterError('polyphase resampling is only supported for integer-valued sampling rates.')

        # For polyphase resampling, we need up- and down-sampling ratios
        # We can get those from the greatest common divisor of the rates
        # as long as the rates are integrable
        orig_sr = int(orig_sr)
        target_sr = int(target_sr)
        gcd = np.gcd(orig_sr, target_sr)
        y_hat = scipy.signal.resample_poly(y, target_sr // gcd, orig_sr // gcd, axis=-1)
    else:
        y_hat = resampy.resample(y, orig_sr, target_sr, filter=res_type, axis=-1)

    if fix:
        y_hat = util.fix_length(y_hat, n_samples, **kwargs)

    if scale:
        y_hat /= np.sqrt(ratio)

    return np.ascontiguousarray(y_hat, dtype=y.dtype)