Ejemplo n.º 1
0
def findD(p1, p2, e):
    """ Formula to get D """

    f = p1 * p2
    n = int(lcm((phi(p1)), (phi(p2))))
    d = modinv(e, n, 0)
    generateKey(f, e, d)
Ejemplo n.º 2
0
def suchn(k):
    counter = 0
    n = 0
    list = []
    while (n <= x):
        if phi(n) == phi(n + k):
            counter = counter + 1
            list.append(n)
        n = n + 1


#remove the hash in the following line to also get the list of n that satisfy phi(n)+phi(n+k) , for each k
    return counter,  #list
Ejemplo n.º 3
0
def main():

    keyParam = [
        int(input('INPUT: FACTOR1 ')),
        int(input('INPUT: FACTOR2 ')),
        int(input('INPUT e '))
    ]

    if lcm(phi(keyParam[0]), phi(keyParam[1])) % keyParam[2] != 0:
        findD(keyParam[0], keyParam[1], keyParam[2])
    else:
        print("e must be a coprime to ",
              int(lcm(phi(keyParam[0]), phi(keyParam[1]))))
        main()
def rates_ss(W):  # set inputs here

    '''
    compute steady-state mean field rates through Euler step
    :param W: weight matrix
    :return: steady-state rates with transfer functions and cellular/synaptic parameters defined in params.py and phi.py
    '''

    par = params_Gaussian.params_Gaussian()
    b = par.b
    gain = par.gain
    tau = par.tau
    N = par.N

    dt = .02 * tau
    Tmax = int(50 * tau / dt)
    a = 1. / tau
    a2 = a ** 2

    r = np.zeros(N)
    s_dummy = np.zeros(N)
    s = np.zeros(N)

    r_vec = np.zeros((N, Tmax))
    for i in range(Tmax):
        s_dummy += dt * (-2 * a * s_dummy - a2 * s) + r * a2 * dt # r * a2 for unit norm alpha function
        s += dt * s_dummy

        g = W.dot(s) + b
        r = phi(g, gain)
        r_vec[:, i] = r

    return r
def generate_key(range):
    arr = []
    while len(arr) != 2:
        ele = random.randint(2, range)
        try:

            if get_primes_array.object[str(ele)] == True:
                arr.append(ele)
        except:
            pass
    p = arr[0]
    q = arr[1]
    #p,q=3,11 here you can reinitialize p and q for sample cases
    print("p and q are : ", p, q)

    n = p * q
    phi_n = phi.phi(n)

    i = 2
    while i < phi_n:
        if gcd(i, phi_n) == 1:
            break
        i = i + 1
    e = i
    d = eulers_multiplicative_inverse.mul_inverse(e, phi_n)
    public_key = [e, n]
    private_key = [d, p, q]
    return public_key, private_key
Ejemplo n.º 6
0
def is_prim_root(x: int, modulus: int) -> bool:
    phi_modulus = phi.phi(modulus)
    phi_factor_dict = factor.factor(phi_modulus)
    
    for single_factor in phi_factor_dict.keys():
        if (modular_exp.modular_exp(x, phi_modulus // single_factor, \
            modulus) == 1):
            return False
    
    return True
Ejemplo n.º 7
0
 def __init__(self):
     import bar
     #PHI
     self.phi = phi.phi()
     self.Npiso = "piso"
     #Menu bar
     #self.menu = info.info_area()
     #self.menu.init()
     #Info bar
     self.info = bar.bar()
     self.tim = bar.tim()
     self.objetos_pantalla = []
     self.FT = True
Ejemplo n.º 8
0
def interpolation(func, func_gradient, xk, pk, initial_alpha=2, c1=0.0001):
    """
    interpolation for finding acceptable alpha
    :param func:
    :param func_gradient:
    :param xk:
    :param pk:
    :param initial_alpha:
    :param c1:
    :param ru:
    :return:
    """
    from phi import phi, phi_derivative_alpha_zero

    if c1 >= 1 or c1 <= 0:
        raise ValueError('C1 must be in range (0,1)')

    alpha_0 = initial_alpha

    # quadratic approximation
    alpha_1_numerator = (phi_derivative_alpha_zero(func_derivative=func_gradient, xk=xk, pk=pk)*(alpha_0**2))
    alpha_1_denominator = 2*(phi(func=func, xk=xk, pk=pk, alpha=alpha_0)-phi(func=func, xk=xk, pk=pk, alpha=0)-phi_derivative_alpha_zero(func_derivative=func_gradient, xk=xk, pk=pk)*(alpha_0))

    if alpha_1_denominator != 0:
        alpha_1 = - alpha_1_numerator/alpha_1_denominator
    else:
        alpha_1 = - alpha_1_numerator / (alpha_1_denominator+0.000001)

    if wolf_condition_1(func=func, func_gradient=func_gradient, xk=xk, pk=pk, alpha=alpha_1, c1=c1) == True:
        return alpha_1

    # cubic approximation
    while True:
        coefficient = 1.0/((alpha_0**2)*(alpha_1**2)*(alpha_1-alpha_0))
        coefficient = float(coefficient)
        matrix_1 = np.array([[alpha_0**2, -(alpha_1**2)], [-(alpha_0**3), alpha_1**3]])
        matrix_2 = np.array([[phi(func=func, xk=xk, pk=pk, alpha=alpha_1)-phi(func=func, xk=xk, pk=pk, alpha=0)-phi_derivative_alpha_zero(func_derivative=func_gradient, xk=xk, pk=pk)*(alpha_1)],
                             [phi(func=func, xk=xk, pk=pk, alpha=alpha_0)-phi(func=func, xk=xk, pk=pk, alpha=0)-phi_derivative_alpha_zero(func_derivative=func_gradient, xk=xk, pk=pk)*(alpha_0)]])
        product = coefficient * np.dot(matrix_1, matrix_2)
        a = product[0, 0]
        b = product[1, 0]

        #temp1 = (-b+np.sqrt((b**2)-3*a*phi_derivative_alpha_zero(func_derivative=func_gradient, xk=xk, pk=pk)))
        #temp2 = -3*a*phi_derivative_alpha_zero(func_derivative=func_gradient, xk=xk, pk=pk)
        #temp3 = (b**2)-3*a*phi_derivative_alpha_zero(func_derivative=func_gradient, xk=xk, pk=pk)
        #temp6 = a

        alpha_2 = (-b+np.sqrt((b**2)-3*a*phi_derivative_alpha_zero(func_derivative=func_gradient, xk=xk, pk=pk)))/(3*a)

        if wolf_condition_1(func=func, func_gradient=func_gradient, xk=xk, pk=pk, alpha=alpha_2, c1=c1) == True:
            return alpha_2

        alpha_0 = alpha_1
        alpha_1 = alpha_2
Ejemplo n.º 9
0
Archivo: 70.py Proyecto: mdonahoe/euler
import phi
def permuto(a,b):
	a = [i for i in str(a)]
	b = [i for i in str(b)]
	a.sort()
	b.sort()
	return a==b

j = 0
m = 100000000000
for i in xrange(2,10**7):
	p = phi.phi(i)
	if permuto(i,p):
		n = i/float(p)
		if n<m:
			j=i
			m=n
			print j
Ejemplo n.º 10
0
def sim_poisson(W, tstop, trans, dt):

    '''
    :param W: weight matrix
    :param tstop: total simulation time (including initial transient)
    :param trans: initial transient (don't record spikes until trans milliseconds)
    :param dt: Euler step
    :return:
    '''
    
    # unpackage parameters
    par = params.params_Gaussian()
    N = par.N
    tau = par.tau
    b = par.b
    gain = par.gain

    # sim variables
    Nt = int(tstop/dt)
    t = 0
    numspikes = 0
    
    maxspikes = 500*N*tstop/1000  # 500 Hz / neuron
    spktimes = np.zeros((maxspikes, 2)) # store spike times and neuron labels
    g_vec = np.zeros((Nt, N))

    # alpha function synaptic variables
    s = np.zeros((N,))
    s0 = np.zeros((N,))
    s_dummy = np.zeros((N,))
    
    a = 1. / tau
    a2 = a**2
    
    for i in range(0,Nt,1):
        
        t += dt
        
        # update each neuron's output and plasticity traces
        s_dummy += dt*(-2*a*s_dummy - a2*s)    
        s += dt*s_dummy
        
        
        
        # compute each neuron's input
        g = np.dot(W, s) + b
        g_vec[i] = g

        # decide if each neuron spikes, update synaptic output of spiking neurons
        # each neurons's rate is phi(g)
        r = phi(g, gain)

        try:
            spiket = np.random.poisson(r*dt, size=(N,))
        except:
            break

        s_dummy += spiket*a2 # a for non-unit norm alpha function (& dimensionless Wij)
        # change to s_dummy += spiket*a2 for unit norm alpha function, but then Wij has units of time
    
        ### store spike times and counts
        if t > trans:
            for j in range(N):
                if spiket[j] >= 1 and numspikes < maxspikes:
                    spktimes[numspikes, 0] = t
                    spktimes[numspikes, 1] = j
                    numspikes += 1
    
    # truncate spike time array
    spktimes = spktimes[0:numspikes, :]

    return spktimes, g_vec
Ejemplo n.º 11
0
def plotphi(files, components, stn, fmts, colors):
    for arq_jformat in files:
        for component in components:
            stn.read(arq_jformat)
            T, ph, ph_err = phi(stn.Z[component])
            plot(T, ph, ph_err, fmts, colors)
Ejemplo n.º 12
0
import matplotlib.pyplot as plt
import numpy as np
from phi import phi
from list_linspace import list_linspace
from math import pi

# 自定义定义域 —— 闭集[start, stop]
start = -2
stop = 2

x = list_linspace(start, stop, 10000)
y = []

for i in x:
    y.append(phi(i))

plt.plot(x, y)
plt.grid(color='r', linestyle='--', linewidth=1, alpha=0.3)
plt.xlabel('x')
plt.ylabel('y')
plt.title('y = f(x)')
plt.show()
def mul_inverse(a,n):
    return pow(a,phi.phi(n)-1)%n 
Ejemplo n.º 14
0
from station import Station
from rhoa import rho_a
from phi import phi
import sys

if (__name__=="__main__"):
    arq_jformat=sys.argv[1]
    component=sys.argv[2]

    stn=Station()
    stn.read(arq_jformat)

    T, rho, rho_err = rho_a(stn.Z[component])
    
    T, phi, phi_err = phi(stn.Z[component])
    
    print "> %s %s" % (stn.stationName, component)
    print "\tT \tRho \tRho_err \tPhi \tPhi_err"
    for j in range(0,len(T)):
        print "%10.4e %10.4e %10.4e %10.4e %10.4e" % \
            (T[j], rho[j], rho_err[j],phi[j],phi_err[j])
Ejemplo n.º 15
0
def dtosmoter(

    ## main arguments / inputs
    data,  ## training set (pandas dataframe)
    y,  ## response variable y by name (string)
    drop_na_col=False,  ## auto drop columns with nan's (bool)
    drop_na_row=False,  ## auto drop rows with nan's (bool)

    ## phi relevance function arguments / inputs
    rel_thres=0.5,  ## relevance threshold considered rare (pos real)
    rel_method="auto",  ## relevance method ("auto" or "manual")
    rel_xtrm_type="both",  ## distribution focus ("high", "low", "both")
    rel_coef=1.5,  ## coefficient for box plot (pos real)
    rel_ctrl_pts_rg=None,  ## input for "manual" rel method  (2d array)
    oversampler=None,
):
    """
	the main function, designed to help solve the problem of imbalanced data
	for regression, much the same as SMOTE for classification; SMOGN applies
	the combintation of under-sampling the majority class (in the case of
	regression, values commonly found near the mean of a normal distribution
	in the response variable y) and over-sampling the minority class (rare
	values in a normal distribution of y, typically found at the tails)

	procedure begins with a series of pre-processing steps, and to ensure no
	missing values (nan's), sorts the values in the response variable y by
	ascending order, and fits a function 'phi' to y, corresponding phi values
	(between 0 and 1) are generated for each value in y, the phi values are
	then used to determine if an observation is either normal or rare by the
	threshold specified in the argument 'rel_thres'

	normal observations are placed into a majority class subset (normal bin)
	and are under-sampled, while rare observations are placed in a seperate
	minority class subset (rare bin) where they're over-sampled

	under-sampling is applied by a random sampling from the normal bin based
	on a calculated percentage control by the argument 'samp_method', if the
	specified input of 'samp_method' is "balance", less under-sampling (and
	over-sampling) is conducted, and if "extreme" is specified more under-
	sampling (and over-sampling is conducted)

	over-sampling is applied one of two ways, either synthetic minority over-
	sampling technique for regression 'smoter' or 'smoter-gn' which applies a
	similar interpolation method to 'smoter', but takes an additional step to
	perturb the interpolated values with gaussian noise

	'smoter' is selected when the distance between a given observation and a
	selected nearest neighbor is within the maximum threshold (half the median
	distance of k nearest neighbors) 'smoter-gn' is selected when a given
	observation and a selected nearest neighbor exceeds that same threshold

	both 'smoter' and 'smoter-gn' are only applied to numeric / continuous
	features, synthetic values found in nominal / categorical features, are
	generated by randomly selecting observed values found within their
	respective feature

	procedure concludes by post-processing and returns a modified pandas data
	frame containing under-sampled and over-sampled (synthetic) observations,
	the distribution of the response variable y should more appropriately
	reflect the minority class areas of interest in y that are under-
	represented in the original training set

	ref:

	Branco, P., Torgo, L., Ribeiro, R. (2017).
	SMOGN: A Pre-Processing Approach for Imbalanced Regression.
	Proceedings of Machine Learning Research, 74:36-50.
	http://proceedings.mlr.press/v74/branco17a/branco17a.pdf.
	"""

    ## pre-process missing values
    if bool(drop_na_col) == True:
        data = data.dropna(axis=1)  ## drop columns with nan's

    if bool(drop_na_row) == True:
        data = data.dropna(axis=0)  ## drop rows with nan's

    ## quality check for missing values in dataframe
    if data.isnull().values.any():
        raise ValueError("cannot proceed: data cannot contain NaN values")

    ## quality check for y
    if isinstance(y, str) is False:
        raise ValueError("cannot proceed: y must be a string")

    if y in data.columns.values is False:
        raise ValueError("cannot proceed: y must be an header name (string) \
               found in the dataframe")

    ## quality check for relevance threshold parameter
    if rel_thres == None:
        raise ValueError("cannot proceed: relevance threshold required")

    if rel_thres > 1 or rel_thres <= 0:
        raise ValueError("rel_thres must be a real number number: 0 < R < 1")

    ## store data dimensions
    n = len(data)
    d = len(data.columns)

    ## store original data types
    feat_dtypes_orig = [None] * d

    for j in range(d):
        feat_dtypes_orig[j] = data.iloc[:, j].dtype

    ## determine column position for response variable y
    y_col = data.columns.get_loc(y)

    ## move response variable y to last column
    if y_col < d - 1:
        cols = list(range(d))
        cols[y_col], cols[d - 1] = cols[d - 1], cols[y_col]
        data = data[data.columns[cols]]

    ## store original feature headers and
    ## encode feature headers to index position
    feat_names = list(data.columns)
    data.columns = range(d)

    ## sort response variable y by ascending order
    y = pd.DataFrame(data[d - 1])
    y_sort = y.sort_values(by=d - 1)
    y_sort = y_sort[d - 1]

    ## -------------------------------- phi --------------------------------- ##
    ## calculate parameters for phi relevance function
    ## (see 'phi_ctrl_pts()' function for details)
    phi_params = phi_ctrl_pts(
        y=y_sort,  ## y (ascending)
        method=rel_method,  ## defaults "auto"
        xtrm_type=rel_xtrm_type,  ## defaults "both"
        coef=rel_coef,  ## defaults 1.5
        ctrl_pts=rel_ctrl_pts_rg  ## user spec
    )

    ## calculate the phi relevance function
    ## (see 'phi()' function for details)
    y_phi = phi(
        y=y_sort,  ## y (ascending)
        ctrl_pts=phi_params  ## from 'phi_ctrl_pts()'
    )

    ## phi relevance quality check
    if all(i == 0 for i in y_phi):
        raise ValueError("redefine phi relevance function: all points are 1")

    if all(i == 1 for i in y_phi):
        raise ValueError("redefine phi relevance function: all points are 0")
    ## ---------------------------------------------------------------------- ##

    ## determine bin (rare or normal) by bump classification
    bumps = [0]

    for i in range(0, len(y_sort) - 1):
        if ((y_phi[i] >= rel_thres and y_phi[i + 1] < rel_thres)
                or (y_phi[i] < rel_thres and y_phi[i + 1] >= rel_thres)):
            bumps.append(i + 1)

    bumps.append(n)

    ## number of bump classes
    n_bumps = len(bumps) - 1

    ## determine indicies for each bump classification
    b_index = {}

    for i in range(n_bumps):
        b_index.update({i: y_sort[bumps[i]:bumps[i + 1]]})

    ## conduct over / under sampling and store modified training set

    imblearn_Y = np.zeros(n)
    for i in range(n_bumps):
        imblearn_Y[b_index[i].index] = i

    data_new = pd.DataFrame(oversampler.fit_resample(data, imblearn_Y)[0])

    ## restore response variable y to original position
    if y_col < d - 1:
        cols = list(range(d))
        cols[y_col], cols[d - 1] = cols[d - 1], cols[y_col]
        data_new = data_new[data_new.columns[cols]]

    ## return modified training set
    return data_new
Ejemplo n.º 16
0
def phi(ref_protein, mut_protein):
    """ Makes this easier to use! """
    from phi import phi
    return phi(ref_protein, cmp_protein)