Пример #1
0
def dtw(A, B, cb, m, r, bsf):
    size = 2 * r + 1
    cost = numpypy.empty(size)
    for i in range(2 * r + 1):
        cost[i] = 1e20

    cost_prev = numpypy.empty(2 * r + 1)
    for i in range(size):
        cost_prev[i] = 1e20
    k = 0
    for i in range(m):
        k = maxima(0, r - i)
        min_cost = 1e20
        # print ( str(i) + "  " + "   " +str(k))
        for j in range(maxima(0, i - r), minima(m - 1, i + r)+1):

            if (i == 0) and (j == 0):
                cost[k] = dist(A[0], B[0])
                min_cost = cost[k]
                k += 1
                continue
            if (j - 1 < 0) or (k - 1 < 0):
                y = 1e20
            else:
                y = cost[k - 1]
            if (i - 1 < 0) or (k + 1 > 2 * r):
                x = 1e20
            else:
                x = cost_prev[k + 1]
            if (i - 1 < 0) or (j - 1 < 0):
                z = 1e20
            else:
                z = cost_prev[k]

            # /// Classic DTW calculation
            cost[k] = minima(minima(x, y), z) + dist(A[i], B[j])
            # print ( str(i) + "  " + str(j) + "   " +str(k))
            # print cost[k]
            # /// Find minimum cost in row for early abandoning (possibly to use column instead of row).
            if cost[k] < min_cost:
                min_cost = cost[k]
            # print j
            # print min_cost
            k += 1
        if (i + r < m - 1) and ((min_cost + cb[i + r + 1]) >= bsf):
            return min_cost + cb[i + r + 1]

        # /// Move current array to previous array.
        cost_tmp = cost
        cost = cost_prev
        cost_prev = cost_tmp

    k -= 1
    final_dtw = cost_prev[k]
    return final_dtw
Пример #2
0
def partial_slda_recalculate_eta_sigma(eta, y, phi):
    """
        Same as slda_recalculate_eta_sigma, but also
          supports partial updates if len(eta) < phi.shape[1] .
          Will only update based on first Ks topics of phi
    """
    D = len(phi)
    ensure(D >= 1)

    N,K = phi[0].shape
    Ks = len(eta)

    print 'e_a...'
    E_A = np.empty((D, Ks))
    for d in xrange(D):
        E_A[d,:] = calculate_EZ(phi[d][:,:Ks])
  
    E_ATA_inverse = calculate_E_ATA_inverse([p[:,:Ks] for p in phi])

    print 'new eta...'
    new_eta = np.dot(np.dot(E_ATA_inverse, E_A.T), y)
    eta[:] = new_eta
    
    print 'new sigma squared...'
    new_sigma_squared = (1.0 / D) * (np.dot(y, y) - np.dot(np.dot(y, E_A), eta))
    return new_sigma_squared
Пример #3
0
def best_model_sel(prob_treshold,survey_type="random", pro_path="/home/jemejia/CosmicVarianceLAES/"):
   
    ID_file=pro_path + "data/mock_survey/" + "ID_" + survey_type + "_surveys.dat"
    p_values_file= pro_path + "data/mock_survey/" + "p_values_FOF_ID_" + survey_type + "_surveys.dat"

    ks_data=np.loadtxt(p_values_file) 
    

    m_min_arr = ks_data[:,0]
    m_max_arr =ks_data[:,1]
    f_occ_arr =ks_data[:,2]
    ID_survey_arr = ks_data[:,3]

    model_prob_arr = ks_data[:,4]
    print np.size(m_min_arr)
    #choosing th models with ks test probabilities greater than prob_treshold
    index=np.where(model_prob_arr>=prob_treshold)
    print np.size(index)
    best_models=np.empty( [ np.size(index) , np.size( ks_data[0,:] ) ] )
    del(ks_data)

    best_models[:,0]=m_min_arr[index]
    best_models[:,1]=m_max_arr[index]
    best_models[:,2]=f_occ_arr[index]
    best_models[:,3]= ID_survey_arr[index] 
    best_models[:,4]=model_prob_arr[index]
    print "the number of selected models are", np.size(best_models[:,0])
    return best_models
Пример #4
0
    def test_empty(self):
        """
        Test that empty() works.
        """

        from numpypy import empty
        a = empty(2)
        a[1] = 1.0
        assert a[1] == 1.0
Пример #5
0
def lower_upper_lemire(t, m, r):
    du = deque(2 * r + 2)
    dl = deque(2 * r + 2)
    u = numpypy.empty(m)
    l = numpypy.empty(m)
    du.push_back(0)
    dl.push_back(0)

    for i in range(m):
        if i > r:
            u[i - r - 1] = t[du.front()]
            l[i - r - 1] = t[dl.front()]
        if t[i] > t[i - 1]:
            du.pop_back()
            while not du.empty() and t[i] > t[du.back()]:
                du.pop_back()
        else:
            dl.pop_back()
            while not dl.empty() and t[i] < t[dl.back()]:
                dl.pop_back()
        du.push_back(i)
        dl.push_back(i)

        if i == 2 * r + 1 + du.front():
            du.pop_front()
        elif i == 2 * r + 1 + dl.front():
            dl.pop_front()

    for i in range(m, m + r + 1):
        u[i - r - 1] = t[du.front()]
        l[i - r - 1] = t[dl.front()]
        if i - du.front() > 2 * r + 1:
            du.pop_front()
        if i - dl.front() > 2 * r + 1:
            dl.pop_front()

    return l, u
Пример #6
0
def lb_keogh_cumulative(order, t, uo, lo, j, m, mean, std, best_so_far):
    lb = 0
    cb = numpypy.empty(m)
    for i in range(m):
        if lb < best_so_far:
            x = (t[(order[i] + j)] - mean) / std
            d = 0
            if x > uo[i]:
                d = dist(x, uo[i])
            elif x < lo[i]:
                d = dist(x, lo[i])
            lb += d
            cb[order[i]] = d
        else:
            break
    return lb, cb
Пример #7
0
 def test_empty(self):
     from numpypy import empty
     import gc
     for i in range(1000):
         a = empty(3)
         assert len(a) == 3
         if not (a[0] == a[1] == a[2] == 0):
             break     # done
         a[0] = 1.23
         a[1] = 4.56
         a[2] = 7.89
         del a
         gc.collect()
     else:
         raise AssertionError(
             "empty() returned a zeroed out array every time")
Пример #8
0
def calculate_EZZT(big_phi):
    """
        Accepts a big phi matrix (like (N x K)
        Calculates E[ZdZdT].
        Returns the final matrix (K x K).

        (Also, E[ZdZdT] = (1/N2)(ΣNΣm!=nφd,nφd,mT  +  ΣNdiag{φd,n})
    """
    (N, K) = big_phi.shape
    inner_sum = np.empty((K, K))

    for i in xrange(K):
        for j in xrange(K):
            inner_sum[i,j] = np.sum(np.multiply.outer(big_phi[:,i], big_phi[:,j])) - np.sum(np.dot(big_phi[:,i], big_phi[:,j]))
    inner_sum += np.diag(np.sum(big_phi, axis=0))
    inner_sum /= (N * N)
    return inner_sum
Пример #9
0
 def test_sort_objects(self):
     # test object array sorts.
     from numpypy import empty
     try:
         a = empty((101,), dtype=object)
     except:
         skip('object type not supported yet')
     a[:] = list(range(101))
     b = a[::-1]
     for kind in ['q', 'h', 'm'] :
         msg = "object sort, kind=%s" % kind
         c = a.copy();
         c.sort(kind=kind)
         assert (c == a).all(), msg
         c = b.copy();
         c.sort(kind=kind)
         assert (c == a).all(), msg
Пример #10
0
def lb_keogh_data_cumulative(order, tz, qo, l, u, m, mean, std, best_so_far):
    lb = 0
    cb = numpypy.empty(m)
    for i in range(m):
        if lb < best_so_far:
            uu = (u[order[i]] - mean) / std
            ll = (l[order[i]] - mean) / std
            d = 0
            if qo[i] > uu:
                d = dist(qo[i], uu)
            elif qo[i] < ll:
                d = dist(qo[i], ll)

            lb += d
            cb[order[i]] = d
        else:
            break
    return  lb,cb
Пример #11
0
def dist(w1, w2):
    def dp(i, j):
        if memo[i, j] != -1:
            return memo[i, j]
        if i == 0:
            return j
        if j == 0:
            return i
        if w1[i - 1] == w2[j - 1]:
            return min(dp(i - 1, j - 1), dp(i - 1, j) + 1, dp(i, j - 1) + 1)
        else:
            return min(dp(i - 1, j - 1) + 1, dp(i - 1, j) + 1, dp(i, j - 1) + 1)

    len1 = len(w1)
    len2 = len(w2)
    memo = np.empty((len1 + 1, len2 + 1), dtype=int)
    memo.fill(-1)
    return dp(len1, len2)
Пример #12
0
def solve(par):
    N, M, likes = par
    dp = np.empty((N, 1 << M), dtype=np.int)
    dp.fill(-1)

    def _solve(step, status):
        if step == N:
            return 1

        if dp[step, status] != -1:
            return dp[step, status]

        sln = 0
        for like in likes[step]:
            num = like - 1
            if status & (1 << num):
                continue
            sln += _solve(step + 1, status | (1 << num))
        dp[step, status] = sln
        return sln

    return _solve(0, 0)
Пример #13
0
 def __init__(self, capacity):
     self.dq = numpypy.empty((capacity,), dtype=numpypy.int32)
     self.size = 0
     self.capacity = capacity
     self.f = 0
     self.r = self.capacity -1
Пример #14
0
def main():
    start_time = time.time()
    # num_args = len(sys.argv)    #to get the number of command line arguments
    # if (num_args < 4):
    #     sys.exit()   #exit if the number of arguments are not more than equal to 3

    # qp = open(sys.argv[2], "r")
    # fp = open(sys.argv[1], "r")
    qp = open("Query.txt", "r")
    fp = open("Data.txt", "r")
    if (qp):
        for line in qp:
            q = line.split()
    else:
        print ("query file doesnt exist")
    bsf = 1e20        #best so far , infinity for starting
    print (bsf)
    # r = int(sys.argv[2])
    r = int(128 *0.05)
    m = len(q)
    print m
    ex = ex1 = 0
    q = map(float, q)
    loc = 0
    ex = sum(q)
    ex1 = sum(map(sqr, q))
    mean = ex / m
    std = ex1 / m
    std = math.sqrt(std - mean * mean)
    q = [(X - mean) / std for X in q]
    q = numpypy.array(q)
    #Create Envelop of the query:lower envelop , l and upper envelop u, r is the wrapping windows
    l, u = lower_upper_lemire(q, m, r)

    Q_tmp = [(q[i], i) for i in range(m)]
    Q_tmp = sorted(Q_tmp, key=lambda pos: pos[0])
    order = numpypy.empty(m, dtype=numpypy.int32)      #this is the list that holds the sorted order of indexes

    qo = numpypy.empty(m)
    uo = numpypy.empty(m)
    lo = numpypy.empty(m)

    for i in range(m):
        o = Q_tmp[i][1]
        order[i] = o
        qo[i] = q[o]
        uo[i] = u[o]
        lo[i] = l[o]

    cb = numpypy.empty(m)
    cb1 = numpypy.empty(m)
    cb2 = numpypy.empty(m)

    for i in range(m):
        cb[i] = 0
        cb1[i] = 0
        cb2[i] = 0

    d = 0 #this is the distance
    i = 0
    j = 0
    ex = ex2 = 0

 
    dataArr = numpypy.array(map(float,fp.readline().split()))
    dataSize =  len(dataArr)

    l_buff, u_buff = lower_upper_lemire(dataArr, len(dataArr), r)
    i =0
    t = numpypy.empty(2 * m)
    tz = numpypy.empty(m)
    kim = 0
    keogh1 = 0
    keogh2 = 0

    print(str(time.time() - start_time) +"seconds. Now heading to DTW computation")

    for i in range(dataSize):
        ex += dataArr[i]
        ex2 += dataArr[i] * dataArr[i]
        t[i % m] = dataArr[i]
        t[(i % m) + m] = dataArr[i]
        if i >= m - 1:
            #     /// the current starting location of T
            #     /// Z_norm(T[i]) will be calculated on the fly
            mean = ex / m
            std = ex2 / m
            std = math.sqrt(std - mean*mean)
            j = (i + 1) % m
            I = i - (m - 1)
            lb_kim = lb_kim_hierarchy(t, q, j, m, mean, std, bsf)
            if(i%10000==0) :print("LB Kim is " + str(kim) + " " + str(keogh1) + " " + str(keogh2) + " " + str(bsf))
            # time.sleep(5)
            # print t
            # print q
            # print("LB Kim is" + str(lb_kim))
            if lb_kim < bsf:
                lb_k, cb1 = lb_keogh_cumulative(order, t, uo, lo, j, m, mean, std, bsf)

                # print cb1

                if (lb_k < bsf):
                    for k in range(m):
                        tz[k] = (t[(k + j)] - mean) / std
                    lb_k2, cb2= lb_keogh_data_cumulative(order, tz, qo, l_buff[I:], u_buff[I:], m, mean, std, bsf)

                    # print cb2
                    if lb_k2 < bsf:
                        if (lb_k > lb_k2):
                            cb[m - 1] = cb1[m - 1]
                            for k in reversed(range(0, m - 1)):
                                cb[k] = cb[k+1]+cb1[k]
                        else:
                            cb[m - 1] = cb2[m - 1]
                            for k in reversed(range(0, m - 1)):
                                cb[k] = cb[k+1]+cb2[k]
                        d = dtw(tz, q, cb, m, r, bsf)
                        # print cb
                        # print d
                        # print "DTW distance is",d
                        if  d < bsf:
                            bsf = d
                            loc = i - m + 1
                    else:
                        keogh2 +=1
                else:
                    keogh1 +=1
            else:
                kim +=1
            ex -= t[j]
            ex2 -= t[j]*t[j]
    print "location: ", loc
    print "dist : ", math.sqrt(bsf)
    print "data scanned: ", i
Пример #15
0
 def empty_like(x):
   return np.empty(x.shape, dtype=x.dtype)
Пример #16
0
        memo[node, color] = 1
        for e in range(1, N + 1):
            if not visited[e] and graph[node, e]:
                memo[node, color] += min(dp(e, 0), dp(e, 1))
    else:
        memo[node, color] = 0
        for e in range(1, N + 1):
            if not visited[e] and graph[node, e]:
                memo[node, color] += dp(e, 1)
    visited[node] = 0
    return memo[node, color]


INF = 1 << 30
sys.stdin = open('input.txt')
while True:
    N = int(input())
    if not N:
        break
    graph = np.zeros((N + 1, N + 1), dtype=np.int16)
    for i in range(1, N + 1):
        line = map(int, raw_input().split())
        for j in range(line[0]):
            graph[i, line[j + 1]] = 1

    memo = np.empty((N + 1, 2), dtype=int)
    memo.fill(INF)
    visited = np.zeros((N + 1), dtype=int)
    visited[1] = 1
    print min(dp(1, 0), dp(1, 1))
Пример #17
0
N1 = 200  # x
N2 = 200  # y

h1 = 10.0 / N1
h2 = 5.0 / N2


# Time
T = 4.0  # Time lapse
J = 500  # Time discretization

tau = T / J  # Time step


# Grid
U = np.empty((N1, N2, J))

########## Initial and border conditions ##########


# u(x=0) = u(x=10) = 0
for t in xrange(0, J):
    for i2 in xrange(0, N2):
        U[0][i2][t] = 0

# u(t=0) = sin(pi*x)*cos(2pi*y)
for i1 in xrange(0, N1):
    for i2 in xrange(0, N2):
        U[i1][i2][0] = sin(pi * h1 * i1) * cos(2 * pi * h2 * i2)

Пример #18
0
 def empty_like(x):
     return np.empty(x.shape, dtype=x.dtype)
Пример #19
0
def best_model_correlation(best_model_array, theta_min,theta_max,theta_bins, survey_type="random",field="full", distance=6558.3, obs_surveys=12,x_width=46.0,y_width=35.0, z_depth=41.0 ,box_length=250,random_cat_number=16, pro_path="/home/jemejia/CosmicVariance/"):

    
    print "computing correlation functions of the selected models"
    dmh_path=pro_path+"data/dark_matter/FOF/"
    laes_path=pro_path+"data/laes/FOF/"
    n_i= int( box_length/x_width)
    n_j= int( box_length/y_width)
    n_k= int( box_length/z_depth)
    n_models = n_i * n_j * n_k
    
    ID_file=pro_path + "data/mock_survey/" + "ID_" + survey_type + "_surveys.dat"
    

   
   
    
    ID_data=np.loadtxt(ID_file,dtype='int') 
    

    survey_ID=ID_data[:,0]
    field_ID=ID_data[:,1]
    i_field = ID_data[:,2]
    j_field = ID_data[:,3]
    k_field = ID_data[:,4]

    moc_surveys=survey_ID[-1]

    ID_arr=best_models_array[:,3]    
    index_eq_ID=np.where(ID_arr== 1)

    cat_number= index_eq_ID[0]-1
 
    i_fields_to_measure=[]
    j_fields_to_measure=[]
    k_fields_to_measure=[]
    m_min_to_measure=[]
    m_max_to_measure=[]
    f_occ_to_measure=[]
    #choosing the subcatalogs of the best fields.
    best_correlation=np.empty([len(ID_arr),theta_bins])
    std_correlation=np.empty([len(ID_arr),theta_bins])
    
    #for w in range( len(ID_arr) ):
    for w in range( 7 ): 
        index=np.where( survey_ID == int(ID_arr[w]) )
        
        S_ID=survey_ID[index]
        ID_ini=S_ID[0]
        ID_end=int(ID_ini+obs_surveys)
        m_min=best_models[w,0]
        m_max=best_models[w,1]
        f_occ=best_models[w,2]
        print "model:",w,"parameters:" ,m_min, m_max, f_occ
        i_s=i_field[ID_ini:ID_end]
        j_s=j_field[ID_ini:ID_end]
        k_s=k_field[ID_ini:ID_end]
        
        
        
        corr=np.zeros( (len(i_s),theta_bins) )
        corr_peebles=np.zeros( (len(i_s),theta_bins) )
        corr_standard=np.zeros( (len(i_s),theta_bins) )
        corr_laes=np.zeros(theta_bins)
        
        if(field=="large"):
            i_range=7
            print "large field"
            
        else:
            i_range=np.size(i_s)
            print "full field"
        print "number of sub-catalogs=",i_range
        for i in range( i_range ):
            
            dmh_filename=dmh_path+"halos_bolshoi_"+str(i_s[i])+"-"+str(j_s[i])+"-"+str(k_s[i])+".csv"
            halos_prop=np.loadtxt(dmh_filename,delimiter=",",skiprows=12)
            
            halo_mass=halos_prop[:,4]
            
            x_halos=halos_prop[:,0]
            y_halos=halos_prop[:,1]
            z_halos=halos_prop[:,2]
            numbers=np.arange(len(halo_mass))
            halo_index=np.where( (halo_mass< m_max) & (halo_mass> m_min) )
            halo_mass_sel=halo_mass[halo_index]
            halo_index=numbers[halo_index]
                    
            np.random.shuffle(halo_index)
            
            
            
            
            n_halos=np.size(halo_mass_sel)
            del halo_mass_sel
            n_laes=int(f_occ*n_halos)
            
            lae_index=halo_index[0:n_laes]
            x_laes=x_halos[lae_index]
            y_laes=y_halos[lae_index]
            
            del x_halos
            del y_halos
            del z_halos
            del halo_mass
            
            #random cat histogram generation
            #P.xlabel(r'$\theta$', fontsize=16)
            #P.ylabel(r"$\xi(\theta)$",fontsize=16)
            if(w==0):
                if(i==0):
                    print w,i
                    print "computing RR (it takes much time but it is only computed once)"
                    x_random= x_width*np.random.random_sample(n_laes*random_cat_number)
                    y_random=y_width*np.random.random_sample(n_laes*random_cat_number)
                    RR,bins=RR_histogram(x_laes,y_laes,x_random,y_random,distance,theta_min,theta_max,theta_bins,cat_number=random_cat_number)
                    print RR
                
            print "subcat number ",i,"i j k=",i_s[i],j_s[i],k_s[i]

            #random-survey histogram generation
            Xmin=x_width*i_s[i]
            Xmax=Xmin + x_width
            Ymin=y_width*j_s[i]
            Ymax=Ymin + y_width
                
                
            x_random= Xmin +  ( Xmax - Xmin )*np.random.random_sample(n_laes)
            y_random=Ymin +   ( Ymax - Ymin )*np.random.random_sample(n_laes)
            
            DR,bins=DR_histogram(x_laes,y_laes,x_random,y_random,distance,theta_min,theta_max,theta_bins,cat_number=1)
            
            #survey histogram generation
            DD,bins=DD_histogram(x_laes,y_laes,distance,theta_min,theta_max,theta_bins)
            
            corr[i,:]=landy_correlation(DD,RR,DR)
            print "CORR_landy=",corr[i,:]
            
        corr_laes=np.mean(corr,axis=0)
        std_corr=np.std(corr,axis=0)
        print "corr_landy=",corr_laes, "std_landy=",std_corr
        
        best_correlation[w,:]=corr_laes
        std_correlation[w,:]=std_corr
        dtheta=(theta_max - theta_min)/theta_bins
        
        correlation_data=np.empty(( np.size(corr_laes) , 3 ) )
        model='{0}_{1}_{2}'.format(m_min, m_max, f_occ)
        model_name = 'model_{0}_{1}_{2}'.format(m_min, m_max, f_occ)
        filename=pro_path + "data/mock_survey/" + "correlation_best_models/" + survey_type + "_correlation_" + model_name + ".dat"
        
        angles = np.linspace( theta_min + dtheta/2.0 , theta_max - dtheta/2.0, theta_bins )
        correlation_data[:,0]=angles
        correlation_data[:,1]=best_correlation[w,:]
        correlation_data[:,2]=std_correlation[w,:]
        
        np.savetxt(filename,correlation_data)
        
        #P.errorbar(correlation_data[:,0]+2.0*w, correlation_data[:,1], correlation_data[:,2],label=model,elinewidth=2.0)
        

    file_plot=pro_path + "data/mock_survey/" + "correlation_best_models/" + survey_type + "_" + field  +"_"+ "correlation_plots" + ".png"
    #P.legend(shadow=False)
    obs_correlation_file=pro_path + "data/obs/hayashino_whole_SSA22_field.txt"
    obs_correlation=np.loadtxt(obs_correlation_file,skiprows=4)
    #P.ylim(ymax=0.6)
    #P.xlim(xmax=1040)
    
    #P.errorbar(obs_correlation[0:theta_bins,0]-3.0, obs_correlation[0:theta_bins,1], obs_correlation[0:theta_bins,2],label="Hayashino et al 2004",elinewidth=3.0,fmt="o-")
    #P.legend(shadow=False)
    #P.title(survey_type)
    #P.savefig(file_plot)
    #P.figure()
    return best_correlation,std_correlation,angles
Пример #20
0
def compute_next_layer(j):
    # Need to comupte layer j, with known layer j-1.
    temp = np.empty((N1, N2))  # j + 1/2 temp layer

    compute_temp(j, temp)
    compute_next(j, temp)
Пример #21
0
import sys
import numpypy as np

sys.stdin = open("input.txt")
itertest = 0
while True:
    try:
        N = int(input())
    except:
        break
    graph = np.empty((N, N))
    for i in range(N):
        line = map(float, raw_input().split())
        line.insert(i, 1.0)
        for j in range(N):
            graph[i, j] = line[j]

    def DLS(curr, path, depth):
        if depth == 0:
            if graph[path[-1], path[0]] * curr >= 1.01:
                path.append(path[0])
                return graph[path[-1], path[0]] * curr
            else:
                return 0
        for i in range(N):
            if i not in path:
                curr2 = curr * graph[path[-1], i]
                path.append(i)
                value = DLS(curr2, path, depth - 1)
                if value > 0:
                    return value
Пример #22
0
            do_var = ptr(do_var)
        getattr(ff, name_fit)(P.size, ptr(P), x.size, ptr(x), ptr(y),
                              ptr(ydata), ptr(a), do_var)
        return P

    return fun, fun_diff, fun_rms, fun_fit


e2, e2_diff, e2_rms, e2_fit = _fun_factory('_e2')
IV3, IV3_diff, IV3_rms, IV3_fit = _fun_factory('_IV3')
IVdbl, IVdbl_diff, IVdbl_rms, IVdbl_fit = _fun_factory('_IVdbl')

IV4, IV4_diff, IV4_rms, IV4_fit = _fun_a_factory('_IV4')
IV5, IV5_diff, IV5_rms, IV5_fit = _fun_a_factory('_IV5')
IV6, IV6_diff, IV6_rms, IV6_fit = _fun_a_factory('_IV6')
IVdbl2, IVdbl2_diff, IVdbl2_rms, IVdbl2_fit = _fun_a_factory('_IVdbl2')

if __name__ == "__main__":
    try:
        import numpypy as np
    except ImportError:
        import numpy as np

    P = np.array([1., 1.])
    x = np.arange(4.)
    y = np.empty(x.shape, x.dtype)

    print e2(P, x, y)

    print P[0] * np.exp(-P[1] * x)
Пример #23
0
import sys
import numpypy as np

sys.stdin = open('input.txt')
itertest = 0
while True:
    try:
        N = int(input())
    except:
        break
    graph = np.empty((N, N))
    for i in range(N):
        line = map(float, raw_input().split())
        line.insert(i, 1.0)
        for j in range(N):
            graph[i, j] = line[j]

    def DLS(curr, path, depth):
        if depth == 0:
            if graph[path[-1], path[0]] * curr >= 1.01:
                path.append(path[0])
                return graph[path[-1], path[0]] * curr
            else:
                return 0
        for i in range(N):
            if i not in path:
                curr2 = curr * graph[path[-1], i]
                path.append(i)
                value = DLS(curr2, path, depth - 1)
                if value > 0:
                    return value