def __calc_usage(self):

		self.mac_usage.clear()

		usage_vec = []
		usage_cpu = []
		usage_mem = []

		for mac in self.machines:
			lst_tasks = []
			if self.machines[mac].n_tasks > 0:
				self.mac_usage[mac] = (self.machines[mac], lst_tasks)
				for task in self.machines[mac].tasks:
					lst_tasks.append(self.tasks[task])

				mac_obj = self.mac_usage[mac][0]

				cpu = min(1., mac_obj.CPU_usage)
				mem = min(1., mac_obj.mem_usage)

				usage_vec.append((cpu * mem) / (mac_obj.capacity_CPU * mac_obj.capacity_memory))
				usage_cpu.append(cpu / mac_obj.capacity_CPU)
				usage_mem.append(mem / mac_obj.capacity_memory)

		if len(usage_vec) > 0:
			self.usage_mean_per = numpy.mean(usage_vec)
			self.usage_stan_per = numpy.std(usage_vec)
			self.usage_CPU_mean = numpy.mean(usage_cpu)
			self.usage_mem_mean = numpy.mean(usage_mem)
Esempio n. 2
0
	def get_stats(self, start, end):

		entries = self.read_rounds(start, end)
		
		macs  = [e.machines_used + e.machines_not_used for e in entries]
		tasks = [e.total_tasks for e in entries]
		tasks_new = [e.task_new for e in entries]
			
		print "Max. num. de maquinas: %d" % (max(macs))
		print "Min. num. de maquinas: %d" % (min(macs))
		print "Med. num. de maquinas: %f" % (numpy.mean(macs))
		print "Var. num. de maquinas: %f" % (numpy.std(macs))

		print "Max. num. de tarefas: %d" % (max(tasks))
		print "Min. num. de tarefas: %d" % (min(tasks))
		print "Med. num. de tarefas: %f" % (numpy.mean(tasks))
		print "Var. num. de tarefas: %f" % (numpy.std(tasks))

		print "Max. num. de nov. tarefas: %d" % (max(tasks_new))
		print "Min. num. de nov. tarefas: %d" % (min(tasks_new))
		print "Med. num. de nov. tarefas: %f" % (numpy.mean(tasks_new))
		print "Var. num. de nov. tarefas: %f" % (numpy.std(tasks_new))

		std_min   = 1000.
		std_max   = 0.
		std_i     = 0
		std_max_i = 0
		std_max   = 0.
		i         = 0

		for e in entries[3:]:

			if e.usage_stan_per < std_min:
				std_i = i
				std_min = e.usage_stan_per

			if e.usage_stan_per > std_max:
				std_max_i = i
				std_max = e.usage_stan_per

			i = i + 1

		print "Min. round std: %d" % (std_i)
		print "Min. std: %f" % (std_min)
		print "Max. round std: %d" % (std_max_i)
		print "Max. std: %f" % (std_max)
 def fn(population):
     fitnesses = [individual.fitness for individual in population]
     average_fitness = np.mean(fitnesses)
     standard_deviation = np.std(fitnesses)
     standard_deviation = max(0.0001, standard_deviation)
     scaled_fitnesses = [1 + (fitness - average_fitness) /
                         (2 * standard_deviation)
                         for fitness in fitnesses]
     return fps(population, fitnesses=scaled_fitnesses)
Esempio n. 4
0
 def fn(population):
     fitnesses = [individual.fitness for individual in population]
     average_fitness = np.mean(fitnesses)
     standard_deviation = np.std(fitnesses)
     standard_deviation = max(0.0001, standard_deviation)
     scaled_fitnesses = [
         1 + (fitness - average_fitness) / (2 * standard_deviation)
         for fitness in fitnesses
     ]
     return fps(population, fitnesses=scaled_fitnesses)
Esempio n. 5
0
def do_logging(adults, generation):
    logger.info('Generation: %s' % generation)
    logger.info('Current generation best individual: %s' % adults[-1])

    fitnesses = [x.fitness for x in adults]
    mean = np.mean(fitnesses)
    std = np.std(fitnesses)

    q1 = fitnesses[len(fitnesses) * 1 / 4 - 1]
    q2 = fitnesses[len(fitnesses) * 2 / 4 - 1]
    q3 = fitnesses[len(fitnesses) * 3 / 4 - 1]

    logging.info('Current generation fitness mean: %s' % mean)
    logging.info('Current generation fitness std: %s' % std)
    logging.info('Current generation quantiles: %s' % [q1, q2, q3])
def do_logging(adults, generation):
    logger.info('Generation: %s' % generation)
    logger.info('Current generation best individual: %s' % adults[-1])

    fitnesses = [x.fitness for x in adults]
    mean = np.mean(fitnesses)
    std = np.std(fitnesses)

    q1 = fitnesses[len(fitnesses) * 1 / 4 - 1]
    q2 = fitnesses[len(fitnesses) * 2 / 4 - 1]
    q3 = fitnesses[len(fitnesses) * 3 / 4 - 1]

    logging.info('Current generation fitness mean: %s' % mean)
    logging.info('Current generation fitness std: %s' % std)
    logging.info('Current generation quantiles: %s' % [q1, q2, q3])
Esempio n. 7
0
 def test_std(self):
     from numpypy import array, std
     a = array([[1, 2], [3, 4]])
     assert std(a) ==  1.1180339887498949
     assert (std(a, axis=0) == array([ 1.,  1.])).all()
     assert (std(a, axis=1) == array([ 0.5,  0.5])).all()
Esempio n. 8
0
 def test_std(self):
     from numpypy import array, std
     a = array([[1, 2], [3, 4]])
     assert std(a) ==  1.1180339887498949
     assert (std(a, axis=0) == array([ 1.,  1.])).all()
     assert (std(a, axis=1) == array([ 0.5,  0.5])).all()
def main():
    start_time = time.time()
    num_args = len(sys.argv)    #to get the number of command line arguments
    if (num_args < 4):
         sys.exit()   #exit if the number of arguments are not more than equal to 3

    # qp = open(sys.argv[2], "r")
    # fp = open(sys.argv[1], "r")
    qp = open(sys.argv[2], "r")
    fp = open(sys.argv[1], "r")
    bsf = 1e20        #best so far , infinity for starting
    m =int(sys.argv[3])
    loc = 0
    r = int(m *float(sys.argv[4]))
    q = numpypy.array(map(float, qp.readline().split()))
    
    mean = numpypy.mean(q)
    std = numpypy.std(q)
    q = [(X - mean) / std for X in q]
    #Create Envelop of the query:lower envelop , l and upper envelop u, r is the wrapping windows
    l, u = lower_upper_lemire(q, m, r)

    Q_tmp = [(q[i], i) for i in range(m)]
    Q_tmp = sorted(Q_tmp, key=lambda pos: pos[0])
    order = numpypy.empty(m, dtype=numpypy.int32)      #this is the list that holds the sorted order of indexes

    qo = numpypy.empty(m)
    uo = numpypy.empty(m)
    lo = numpypy.empty(m)

    for i in range(m):
        o = Q_tmp[i][1]
        order[i] = o
        qo[i] = q[o]
        uo[i] = u[o]
        lo[i] = l[o]

    cb = numpypy.empty(m)
    cb1 = numpypy.empty(m)
    cb2 = numpypy.empty(m)

    for i in range(m):
        cb[i] = 0
        cb1[i] = 0
        cb2[i] = 0

    d = 0 #this is the distance
    i = 0
    j = 0
    ex = ex2 = 0

 
    dataArr = map(float,fp.readline().split())
    dataSize =  len(dataArr)

    l_buff, u_buff = lower_upper_lemire(dataArr, len(dataArr), r)
    i =0
    t = numpypy.empty(2 * m)
    tz = numpypy.empty(m)
    kim = 0
    keogh1 = 0
    keogh2 = 0

    print(str(time.time() - start_time) +"seconds. Now heading to DTW computation")

    for i in range(dataSize):
        ex += dataArr[i]
        ex2 += dataArr[i] * dataArr[i]
        t[i % m] = dataArr[i]
        t[(i % m) + m] = dataArr[i]
        if i >= m - 1:
            #     /// the current starting location of T
            #     /// Z_norm(T[i]) will be calculated on the fly
            mean = ex / m
            std = ex2 / m
            std = math.sqrt(std - mean*mean)
            j = (i + 1) % m
            I = i - (m - 1)
            lb_kim = lb_kim_hierarchy(t, q, j, m, mean, std, bsf)
            #if(i%10000==0) :print("LB Kim is " + str(kim) + " " + str(keogh1) + " " + str(keogh2) + " " + str(bsf))
            # time.sleep(5)
            # print t
            # print q
            # print("LB Kim is" + str(lb_kim))
            if lb_kim < bsf:
                lb_k, cb1 = lb_keogh_cumulative(order, t, uo, lo, j, m, mean, std, bsf)

                # print cb1

                if (lb_k < bsf):
                    for k in range(m):
                        tz[k] = (t[(k + j)] - mean) / std
                    lb_k2, cb2= lb_keogh_data_cumulative(order, tz, qo, l_buff[I:], u_buff[I:], m, mean, std, bsf)

                    # print cb2
                    if lb_k2 < bsf:
                        if (lb_k > lb_k2):
                            cb[m - 1] = cb1[m - 1]
                            for k in reversed(range(0, m - 1)):
                                cb[k] = cb[k+1]+cb1[k]
                        else:
                            cb[m - 1] = cb2[m - 1]
                            for k in reversed(range(0, m - 1)):
                                cb[k] = cb[k+1]+cb2[k]
                        d = dtw(tz, q, cb, m, r, bsf)
                        # print cb
                        # print d
                        # print "DTW distance is",d
                        if  d < bsf:
                            bsf = d
                            loc = i - m + 1
                    else:
                        keogh2 +=1
                else:
                    keogh1 +=1
            else:
                kim +=1
            ex -= t[j]
            ex2 -= t[j]*t[j]
    print "location: ", loc
    print "dist : ", math.sqrt(bsf)
    print "data scanned: ", i
Esempio n. 10
0
 def test_std(self):
     from numpypy import array, std
     a = array([[1, 2], [3, 4]])
     assert std(a) ==  1.1180339887498949
Esempio n. 11
0
 def test_std(self):
     from numpypy import array, std
     a = array([[1, 2], [3, 4]])
     assert std(a) == 1.1180339887498949
Esempio n. 12
0
def best_model_correlation(best_model_array, theta_min,theta_max,theta_bins, survey_type="random",field="full", distance=6558.3, obs_surveys=12,x_width=46.0,y_width=35.0, z_depth=41.0 ,box_length=250,random_cat_number=16, pro_path="/home/jemejia/CosmicVariance/"):

    
    print "computing correlation functions of the selected models"
    dmh_path=pro_path+"data/dark_matter/FOF/"
    laes_path=pro_path+"data/laes/FOF/"
    n_i= int( box_length/x_width)
    n_j= int( box_length/y_width)
    n_k= int( box_length/z_depth)
    n_models = n_i * n_j * n_k
    
    ID_file=pro_path + "data/mock_survey/" + "ID_" + survey_type + "_surveys.dat"
    

   
   
    
    ID_data=np.loadtxt(ID_file,dtype='int') 
    

    survey_ID=ID_data[:,0]
    field_ID=ID_data[:,1]
    i_field = ID_data[:,2]
    j_field = ID_data[:,3]
    k_field = ID_data[:,4]

    moc_surveys=survey_ID[-1]

    ID_arr=best_models_array[:,3]    
    index_eq_ID=np.where(ID_arr== 1)

    cat_number= index_eq_ID[0]-1
 
    i_fields_to_measure=[]
    j_fields_to_measure=[]
    k_fields_to_measure=[]
    m_min_to_measure=[]
    m_max_to_measure=[]
    f_occ_to_measure=[]
    #choosing the subcatalogs of the best fields.
    best_correlation=np.empty([len(ID_arr),theta_bins])
    std_correlation=np.empty([len(ID_arr),theta_bins])
    
    #for w in range( len(ID_arr) ):
    for w in range( 7 ): 
        index=np.where( survey_ID == int(ID_arr[w]) )
        
        S_ID=survey_ID[index]
        ID_ini=S_ID[0]
        ID_end=int(ID_ini+obs_surveys)
        m_min=best_models[w,0]
        m_max=best_models[w,1]
        f_occ=best_models[w,2]
        print "model:",w,"parameters:" ,m_min, m_max, f_occ
        i_s=i_field[ID_ini:ID_end]
        j_s=j_field[ID_ini:ID_end]
        k_s=k_field[ID_ini:ID_end]
        
        
        
        corr=np.zeros( (len(i_s),theta_bins) )
        corr_peebles=np.zeros( (len(i_s),theta_bins) )
        corr_standard=np.zeros( (len(i_s),theta_bins) )
        corr_laes=np.zeros(theta_bins)
        
        if(field=="large"):
            i_range=7
            print "large field"
            
        else:
            i_range=np.size(i_s)
            print "full field"
        print "number of sub-catalogs=",i_range
        for i in range( i_range ):
            
            dmh_filename=dmh_path+"halos_bolshoi_"+str(i_s[i])+"-"+str(j_s[i])+"-"+str(k_s[i])+".csv"
            halos_prop=np.loadtxt(dmh_filename,delimiter=",",skiprows=12)
            
            halo_mass=halos_prop[:,4]
            
            x_halos=halos_prop[:,0]
            y_halos=halos_prop[:,1]
            z_halos=halos_prop[:,2]
            numbers=np.arange(len(halo_mass))
            halo_index=np.where( (halo_mass< m_max) & (halo_mass> m_min) )
            halo_mass_sel=halo_mass[halo_index]
            halo_index=numbers[halo_index]
                    
            np.random.shuffle(halo_index)
            
            
            
            
            n_halos=np.size(halo_mass_sel)
            del halo_mass_sel
            n_laes=int(f_occ*n_halos)
            
            lae_index=halo_index[0:n_laes]
            x_laes=x_halos[lae_index]
            y_laes=y_halos[lae_index]
            
            del x_halos
            del y_halos
            del z_halos
            del halo_mass
            
            #random cat histogram generation
            #P.xlabel(r'$\theta$', fontsize=16)
            #P.ylabel(r"$\xi(\theta)$",fontsize=16)
            if(w==0):
                if(i==0):
                    print w,i
                    print "computing RR (it takes much time but it is only computed once)"
                    x_random= x_width*np.random.random_sample(n_laes*random_cat_number)
                    y_random=y_width*np.random.random_sample(n_laes*random_cat_number)
                    RR,bins=RR_histogram(x_laes,y_laes,x_random,y_random,distance,theta_min,theta_max,theta_bins,cat_number=random_cat_number)
                    print RR
                
            print "subcat number ",i,"i j k=",i_s[i],j_s[i],k_s[i]

            #random-survey histogram generation
            Xmin=x_width*i_s[i]
            Xmax=Xmin + x_width
            Ymin=y_width*j_s[i]
            Ymax=Ymin + y_width
                
                
            x_random= Xmin +  ( Xmax - Xmin )*np.random.random_sample(n_laes)
            y_random=Ymin +   ( Ymax - Ymin )*np.random.random_sample(n_laes)
            
            DR,bins=DR_histogram(x_laes,y_laes,x_random,y_random,distance,theta_min,theta_max,theta_bins,cat_number=1)
            
            #survey histogram generation
            DD,bins=DD_histogram(x_laes,y_laes,distance,theta_min,theta_max,theta_bins)
            
            corr[i,:]=landy_correlation(DD,RR,DR)
            print "CORR_landy=",corr[i,:]
            
        corr_laes=np.mean(corr,axis=0)
        std_corr=np.std(corr,axis=0)
        print "corr_landy=",corr_laes, "std_landy=",std_corr
        
        best_correlation[w,:]=corr_laes
        std_correlation[w,:]=std_corr
        dtheta=(theta_max - theta_min)/theta_bins
        
        correlation_data=np.empty(( np.size(corr_laes) , 3 ) )
        model='{0}_{1}_{2}'.format(m_min, m_max, f_occ)
        model_name = 'model_{0}_{1}_{2}'.format(m_min, m_max, f_occ)
        filename=pro_path + "data/mock_survey/" + "correlation_best_models/" + survey_type + "_correlation_" + model_name + ".dat"
        
        angles = np.linspace( theta_min + dtheta/2.0 , theta_max - dtheta/2.0, theta_bins )
        correlation_data[:,0]=angles
        correlation_data[:,1]=best_correlation[w,:]
        correlation_data[:,2]=std_correlation[w,:]
        
        np.savetxt(filename,correlation_data)
        
        #P.errorbar(correlation_data[:,0]+2.0*w, correlation_data[:,1], correlation_data[:,2],label=model,elinewidth=2.0)
        

    file_plot=pro_path + "data/mock_survey/" + "correlation_best_models/" + survey_type + "_" + field  +"_"+ "correlation_plots" + ".png"
    #P.legend(shadow=False)
    obs_correlation_file=pro_path + "data/obs/hayashino_whole_SSA22_field.txt"
    obs_correlation=np.loadtxt(obs_correlation_file,skiprows=4)
    #P.ylim(ymax=0.6)
    #P.xlim(xmax=1040)
    
    #P.errorbar(obs_correlation[0:theta_bins,0]-3.0, obs_correlation[0:theta_bins,1], obs_correlation[0:theta_bins,2],label="Hayashino et al 2004",elinewidth=3.0,fmt="o-")
    #P.legend(shadow=False)
    #P.title(survey_type)
    #P.savefig(file_plot)
    #P.figure()
    return best_correlation,std_correlation,angles
Esempio n. 13
0
	#				node_Phi[i] = rd.gauss(phi, sigma)

			#Homophiliation

			for t in range(NO_Phi_realization):
				for i in G.nodes():
					neighbor_Phi[i] = [node_Phi[i]]
					for j in G[i].keys():
						neighbor_Phi[i].append(node_Phi[j])
				for i in G.nodes():
					node_Phi[i] = np.mean(neighbor_Phi[i])
#					node_Phi[i] = sum(neighbor_Phi[i])/(len(G[i])+1)
	#		print node_Phi

			new_mean[phi_itr][z_itr].append(np.mean(node_Phi))
			new_std[phi_itr][z_itr].append(np.std(node_Phi))
#			print new_mean
#			print new_std

			node_Phi_data.append(node_Phi)

			for i in G.nodes():
				G.node[i] = 0

			r = rd.randint(0,n-1)
			for i in range(int(rho*n)):
				G.node[r] = 1
				r = rd.randint(0,n-1)
				while G.node[r] == 1:
					r = rd.randint(0,n-1)
					if G.node[r] == 0:
Esempio n. 14
0
 def std(cls, input_list, **kwargs):
     """
     Calculate variance
     """
     return round(numpypy.std(input_list, **kwargs), cls.default_round)