Exemplo n.º 1
0
def fmean(data):
    """ Convert data to floats and compute the arithmetic mean.

    This runs faster than the mean() function and it always returns a float.
    The result is highly accurate but not as perfect as mean().
    If the input dataset is empty, it raises a StatisticsError.

    >>> fmean([3.5, 4.0, 5.25])
    4.25

    """
    try:
        n = len(data)
    except TypeError:
        # Handle iterators that do not define __len__().
        n = 0
        def count(x):
            nonlocal n
            n += 1
            return x
        total = math.fsum(map(count, data))
    else:
        total = math.fsum(data)
    try:
        return total / n
    except ZeroDivisionError:
        raise StatisticsError('fmean requires at least one data point') from None
def calcEBP(bandstruct, name, N_VB, N_CB, PRINT_DEBUG):
	bs = bandstruct
	for i in range(bs.nb_bands):
		if min(bs.bands[1][i]) > bs.efermi:
			cbbottom = i
			vbtop = i-1
			break

	vb_en = 0.0 
	cb_en = 0.0 
	for i in range(N_VB):
		vb_en += math.fsum(bs.bands[1][vbtop-i])/N_VB
	for i in range(N_CB):
		cb_en += math.fsum(bs.bands[1][cbbottom+i])/N_CB 

	ebp = (vb_en + cb_en)/(2*len(bs.kpoints))
	if not(bs.is_metal()):
		ebp -= (bs.get_vbm())['energy']
	else:
		print 'Warning, material is a metal! No VBM offset applied, no file written'
	if PRINT_DEBUG:
		print 'The branch point energy of ' + name + ' is ' + str(ebp) + ' eV'
	ebp *= EBP_CORRECTION_SLOPE
	ebp += EBP_CORRECTION_Y_INT
	return ebp
Exemplo n.º 3
0
 def x_t_fsum(self, t):
     alpha_k = self._r_tau(t - self._s)
     terms = np.asarray([self._Y[a] * alpha_k[i] * self._A[i, a] for (a,i) in product(range(self._num_ev), range(self._num_gp))])
     re = fsum(np.real(terms))
     im = fsum(np.imag(terms))
     
     return re + 1j*im
    def step_generation(self, senders, receivers):
        # x_i(t+1) = (a + u(e^i, x(t)))*x_i(t) / (a + u(x(t), x(t)))
        # a is background (lifetime) birthrate -- set to 0

        s_payoffs = self._data['s_payoffs']
        r_payoffs = self._data['r_payoffs']
        s_fitness = [0.] * len(senders)
        r_fitness = [0.] * len(receivers)

        for (s, sp), (r, (rp, rt)) in itertools.product(enumerate(senders), enumerate(receivers)):
            state_acts = self._interactions[(s, r)]
            s_fitness[s] += math.fsum(s_payoffs[state][act] * rp for state, act in state_acts) / 4.
            r_fitness[r] += math.fsum(r_payoffs[rt][state][act] * sp for state, act in state_acts) / 4.

        avg_s = math.fsum(s_fitness[s] * sp for s, sp in enumerate(senders))
        avg_r = math.fsum(r_fitness[r] * rp for r, (rp, rt) in enumerate(receivers))

        new_senders = [s_fitness[s] * sp / avg_s for s, sp in enumerate(senders)]
        new_receivers = [(r_fitness[r] * rp / avg_r, rt) for r, (rp, rt) in enumerate(receivers)]

        for s, sp in enumerate(new_senders):
            if sp < effective_zero:
                new_senders[s] = 0.
        for r, (rp, rt) in enumerate(new_receivers):
            if rp < effective_zero:
                new_receivers[r] = (0., rt)

        return (tuple(new_senders), tuple(new_receivers))
Exemplo n.º 5
0
def get_device_local_storage_price(device):
    price = math.fsum(s.get_price() for s in device.storage_set.all())
    if not price and device.model and device.model.type in (
            DeviceType.rack_server.id, DeviceType.blade_server.id,
            DeviceType.virtual_server.id):
        try:
            os = OperatingSystem.objects.get(device=device)
            group = ComponentModelGroup.objects.get(name='OS Detected Storage')
        except (OperatingSystem.DoesNotExist,
                ComponentModelGroup.DoesNotExist):
            pass
        else:
            if not group.per_size:
                return group.price or 0
            else:
                storage = os.storage or 0
                remote_storage_size = math.fsum(
                    m.get_size() for m in device.disksharemount_set.all()
                )
                storage -= remote_storage_size
                if storage > 0:
                    return (storage /
                            (group.size_modifier or 1)) * (group.price or 0)
        if device.model.type != DeviceType.virtual_server.id:
            try:
                group = ComponentModelGroup.objects.get(name='Default Disk')
            except ComponentModelGroup.DoesNotExist:
                pass
            else:
                return group.price
    return price
Exemplo n.º 6
0
    def isi_differ(self, mod_t, exp_t, args):
        """
        Calculates the normalized average absolute ISI difference in the two traces.
        The first half of the spikes or the first four spikes (whichever is less) are excluded from the calculation.
        
        :param mod_t: the trace obtained from the model as ``list``
        :param exp_t: the input trace as ``list``
        :param args: optional arguments as ``dictionary``
        
        .. note::
            If neither trace contains spikes, the function returns zero.
            If one traces has no spikes, but the other has the function returns one.
            
        :return: the normalized average absolute ISI difference
        
        """
        add_data = args.get("add_data", None)
        window = int(self.option.spike_window)
        spikes = [[], []]
        if (self.model.spike_times == None):
            spikes[0] = self.detectSpike(mod_t)
        elif len(self.model.spike_times)!=0:
            #print "using spike times"
            for sp_t in self.model.spike_times:
                start_pos=sp_t-window
                if start_pos<0:
                    start_pos=0
                start_val=mod_t[start_pos]
                peak_pos=sp_t
                peak_val=mod_t[sp_t]
                end_pos=sp_t+window
                if end_pos>=len(mod_t):
                    end_pos=len(mod_t)-1
                end_val=mod_t[end_pos]
                
                spikes[0].append(spike_frame(start_pos,start_val,peak_pos,peak_val,end_pos,end_val))
        if add_data != None:
            spikes[1] = add_data
        else:
            spikes[1] = self.detectSpike(exp_t)
        tmp = []
        #tmp.append(abs(len(spikes[0])-len(spikes[1]))/max( float(len(spikes[0])),float(len(spikes[1])-1) ))
        if (len(spikes[0]) < 2) and (len(spikes[1]) < 2):
            return 0
        if (len(spikes[0]) < 2) != (len(spikes[1]) < 2):
            return 1
        for s in range(min(len(spikes[0]), len(spikes[1])) - 1):
            tmp.append(abs((spikes[0][s + 1].peak - spikes[0][s].peak)
                        - (spikes[1][s + 1].peak - spikes[1][s].peak)))
        if len(spikes[0]) > len(spikes[1]):
            tmp.append((spikes[0][-1].peak - spikes[0][len(spikes[1])-1].peak))
        elif len(spikes[0]) < len(spikes[1]):
            tmp.append((spikes[1][-1].peak - spikes[1][len(spikes[0])-1].peak))

        if self.option.output_level == "1":
            print "isi difference:"
            print "mod: ", len(spikes[0])
            print "exp: ", len(spikes[1])
            print fsum(tmp), " / ", len(exp_t), " = ", fsum(tmp) / len(exp_t)
        return fsum(tmp) / len(exp_t)
Exemplo n.º 7
0
 def calc_ase(self, mod_t, exp_t, args):
     """
     Calculates the normalized average squared difference of the traces.
     
     :param mod_t: the trace obtained from the model as ``list``
     :param exp_t: the input trace as ``list``
     :param args: optional arguments as ``dictionary``
     
     :return: the normalized average squared difference, where the normalization is done by
         the squared range of the input trace
         
     """
     if (args["cov_m"]!=None):
             return self.calc_ase_cov(mod_t,exp_t,args)
     temp = []
     for n in range(min([len(exp_t), len(mod_t)])):
         try:
             temp.append(pow(exp_t[n] - mod_t[n], 2))
         except OverflowError:
             return 1
         #except TypeError:
         #    return 1
     try:
         if self.option.output_level == "1":
             print "ase"
             print fsum(temp) / len(temp) / (pow(max(exp_t) - min(exp_t), 2))
     except OverflowError:
             return 1
     return fsum(temp) / len(temp) / (pow(max(exp_t) - min(exp_t), 2))
Exemplo n.º 8
0
   def updateSuggestionsForUser(self, user):
      userLikedItems = self.itemsLikedByUser(user)
      userDislikedItems = self.itemsDislikedByUser(user)

      userUnratedItems = frozenset(Item.objects.all()) - (frozenset(userLikedItems) | frozenset(userDislikedItems))
      similarUsers = self.usersSimilarToUser(user)

      for item in userUnratedItems:
         indicesOfLikes = [similarUser[1] for similarUser in similarUsers if item in self.itemsLikedByUser(similarUser[0])]
         indicesOfDislikes = [similarUser[1] for similarUser in similarUsers if item in self.itemsDislikedByUser(similarUser[0])]

         if indicesOfLikes or indicesOfDislikes:
            weight = (fsum(indicesOfLikes) - fsum(indicesOfDislikes)) / (len(indicesOfLikes) + len(indicesOfDislikes))

            try:
               suggestion = Suggestion.objects.get(user=user, item=item)

               if suggestion.weight != weight:
                  suggestion.weight = weight
                  suggestion.save(update_fields=("weight",))
            except ObjectDoesNotExist:
               Suggestion.objects.create(user=user, item=item, weight=weight)
         else:
            try:
               Suggestion.objects.get(user=user, item=item).delete()
            except ObjectDoesNotExist:
               pass
Exemplo n.º 9
0
    def calc_grad_dif(self, mod_t, exp_t, args):
        """
        Calculates the normalized average squared differences of derivatives of the given traces.
        The gradient is calculated as follows:
        ::

            grad_a=((mod_t[i+1]-mod_t[i-1])/(2*dt))

        where dt is the step between to points in the trace
        
        :param mod_t: the trace obtained from the model as ``list``
        :param exp_t: the input trace as ``list``
        :param args: optional arguments as ``dictionary``
        
        :return: the normalized average squared differences of derivatives where the normalization is done
            by the squared range of the input trace    
        """
        dt = self.reader.data.step
        grad_a = 0
        grad_b = 0
        tmp = []
        for i in range(1, min(len(mod_t), len(exp_t)) - 1):
            grad_a = ((mod_t[i + 1] - mod_t[i - 1]) / (2 * dt))
            grad_b = ((exp_t[i + 1] - exp_t[i - 1]) / (2 * dt))
            tmp.append((grad_a - grad_b) ** 2)
        try:
            if self.option.output_level == "1":
                print "grad dif"
                print fsum(tmp) / len(tmp) / (pow(max(grad_b) - min(grad_b), 2))
        except OverflowError:
                return 1
            
            
        return fsum(tmp) / len(tmp) / (pow(max(grad_b) - min(grad_b), 2))  
Exemplo n.º 10
0
 def compute(self,values):
     if not len(values):
         return None
     valsum = math.fsum([x**2 for x in values])
     if not valsum:
         return None
     return math.fsum(values)**2/(len(values)*valsum)
Exemplo n.º 11
0
 def yules_k(self):
     freqs = self.self_wordfreq()
     freq_set = list(set(freqs))
     M1 = math.fsum([freqs.count(f)*f for f in freq_set])
     M2 = math.fsum([(f**2)*freqs.count(f) for f in freq_set])
     K = (10000)*(M2 - M1)/(M1**2)
     return K
def h2jpopx(t,v,maxj,maxv):
    j = np.arange(maxj + 1, dtype = float)  #Array of j values
    g_j = 2.0*j + 1.0
    E_j = h2ejx(v,maxj)                  #cm^-1

    nj = g_j*np.exp(-(E_j*h*c)/(kb*t)) #Herzberg pg. 124

    #Need to properly normalize now (follow Herzberg pgs. 123-125)
    #For most astrophysically relevant temperatures, a max j of 100
    #should be adequate for getting the Q sum to converge

    highj = 25   #TOO MUCH HIGHER AND THE NUMBERS GET SO SMALL THEY ROLLOVER
    E_i = h2ejx(v,highj)
    i = np.arange(highj + 1, dtype = float)

    Qr = (2.0*i + 1.0)*np.exp(-(E_i*h*c)/(kb*t))
    njn = nj/math.fsum(Qr)

    #Get vibrational population (only relevant for large temperatures)
    E_v = np.zeros(maxv+1)

    for m in range(0,maxv+1):
        E_v[m] = h2ejx(m,0)      #cm^-1
    Qv = np.exp(-(E_v*h*c)/(kb*t))
    nvn = Qv/math.fsum(Qv)
    njn = njn*nvn[v]
    return njn
Exemplo n.º 13
0
    def weightedOverhead(self, numpy=False):
        ns = 'weighted.numpy' if numpy else 'weighted'
        factors = {
            'strConcat': 0.8171743283710745,
            'objectInit': 162.78282024428898,
            'getListItem': 12.018827808252004,
            'functionCall': 48.613475069418904,
            'getDictItem': 0.2148913920265531,
            'methodCall': 75.36797944163118,
            'numpy': 1.0,
            }
        runtimes = {
            'strConcat': self.totalStrConcatRuntime(),
            'objectInit': self.totalObjectCreations() * PYTHON_OBJECT_CREATION_COST,
            'functionCall': self.totalPythonCalls() * PYTHON_CALL_COST,
            'getListItem': self.totalGetitemRuntime(),
            }
        if numpy:
            runtimes['numpy'] = self.totalNumpyRuntime()

        total_runtime = fsum(runtimes.values())
        c_runtimes = {key:(runtime / factors[key]) for key, runtime in runtimes.iteritems()}
        total_c_runtime = fsum(c_runtimes.values())

        self.store(ns, 'total_tt', self.total_tt)
        self.store(ns, 'total_runtime', total_runtime)
        self.store(ns, 'total_c_runtime', total_c_runtime)
        self.store(ns, 'runtimes', runtimes)
        self.store(ns, 'factors', factors)
        self.store(ns, 'c_runtimes', c_runtimes)

        return 1 - (total_c_runtime / total_runtime), (total_runtime / self.total_tt)
Exemplo n.º 14
0
    def P_Nbpi(self, obs, Nbpi):
        top=PoissonApprox2(obs.Nmi,obs.Ci*Nbpi)
        #print(top, obs.Nmi, obs.Ci, Nbpi)

        if obs.Nmi>5:
            bottom=1.0/(obs.Ci)
        else:
            poissonrange=(obs.Npi*3+obs.Nmi*3+30)
            #memoize the bottom part
            if obs.Nmi in self.memoizedict2:
                if obs.Ci*Nbpi in self.memoizedict2[obs.Nmi]:
                    bottom=self.memoizedict2[obs.Nmi][obs.Ci*Nbpi]
                else:
                    self.memoizedict2[obs.Nmi][obs.Ci*Nbpi]=math.fsum(\
                        [PoissonApprox2(obs.Nmi,obs.Ci*Nbpi2)\
                        for Nbpi2 in range(poissonrange)])
                    bottom=self.memoizedict2[obs.Nmi][obs.Ci*Nbpi]
            else:
                self.memoizedict2[obs.Nmi]={}
                self.memoizedict2[obs.Nmi][obs.Ci*Nbpi]=math.fsum(\
                    [PoissonApprox2(obs.Nmi,obs.Ci*Nbpi2)\
                    for Nbpi2 in range(poissonrange)])
                bottom=self.memoizedict2[obs.Nmi][obs.Ci*Nbpi]
        if bottom==0.0:
            print("%s/%s" % (top, bottom))
            print("Nbpi: %s" % Nbpi)
            print("Nmi: %s" % obs.Nmi)
            print("Ci: %s" % obs.Ci)
        prob=top/bottom

        return prob
Exemplo n.º 15
0
def simpsons(a,b,n):
    Xk = (float) (b-a)/n
    x2 = arange(a+2*Xk, b-Xk, 2*Xk) # 3rd, 5th, 7th... (n-2)th x values
    x4 = arange(a+Xk, b, 2*Xk) # 2nd, 4th, 6th... (n-1)th x values
    sum_of_2s = 2*fsum(f(x2))
    sum_of_4s = 4*fsum(f(x4))
    return Xk/3*(f(a) + sum_of_2s + sum_of_4s + f(b))
Exemplo n.º 16
0
def initialize_h_k_aux_variables(settings, instance):
    """Initialize auxiliary variables for the h_k model.
    
    Initialize the rbfval and mu_k_inv variables of a problem
    instance, using the values for for x and u_pi already given. This
    helps the local search by starting at a feasible point.

    Parameters
    ----------
    settings : rbfopt_settings.RbfSettings
        Global and algorithmic settings.

    instance : pyomo.ConcreteModel
        A concrete instance of mathematical optimization model.
    """
    assert(isinstance(settings, RbfSettings))

    instance.rbfval = math.fsum(instance.lambda_h[i] * instance.u_pi[i].value
                                for i in instance.Q)

    instance.mu_k_inv = ((-1)**ru.get_degree_polynomial(settings) *
                         math.fsum(instance.Ainv[i,j] * instance.u_pi[i].value
                                   * instance.u_pi[j].value
                                   for i in instance.Q for j in instance.Q) + 
                         instance.phi_0)
Exemplo n.º 17
0
def get_init_nation_data(nation_set, all_games):
    nation_data = {}
    for nation in nation_set:
        nation_data[nation] = {"games":[],"GF":[],"GA":[],"weight":[], \
            "Off":0, "Def":0, "error":1}
    total_goals = []
    total_weight = []
    for game in all_games:
        team1 = game["team1"]
        team2 = game["team2"]
        nation_data[team1]["games"].append(game)
        nation_data[team1]["GF"].append(game["goals1"]*game["weight"])
        nation_data[team1]["GA"].append(game["goals2"]*game["weight"])
        nation_data[team1]["weight"].append(game["weight"])
        nation_data[team2]["games"].append(game)
        nation_data[team2]["GF"].append(game["goals2"]*game["weight"])
        nation_data[team2]["GA"].append(game["goals1"]*game["weight"])
        nation_data[team2]["weight"].append(game["weight"])
        total_goals.append((game["goals1"]+game["goals2"]) * game["weight"])
        total_weight.append(game["weight"])
    
    for nation in nation_set:
        # initial estimates of Offense/Defense are GF/GA divided by weight
        nation_data[nation]["GF"] = math.fsum(nation_data[nation]["GF"])
        nation_data[nation]["GA"] = math.fsum(nation_data[nation]["GA"])
        nation_data[nation]["Offense"] = nation_data[nation]["GF"]
        nation_data[nation]["weight"]=math.fsum(nation_data[nation]["weight"])
        nation_data[nation]["Defense"] = nation_data[nation]["GA"]
        nation_data[nation]["Offense"] /= nation_data[nation]["weight"]
        nation_data[nation]["Defense"] /= nation_data[nation]["weight"]
        
    total_goals = math.fsum(total_goals)
    total_weight = math.fsum(total_weight)
    return nation_data, total_goals/total_weight/2 # PER TEAM
Exemplo n.º 18
0
	def VerifyHMM (self):
		if fsum(self.StartDistribution.values()) <> 1.0:
			print 'The Start Distribution sums to',fsum(self.StartDistribution.values())
			return False
		if  len(self.States)<len(self.TransitionDistribution) or \
			len(self.States)<>len(self.TransitionDistribution):
			print 'Missing States Data'
			return False
		self.SM = [0.0]* len(self.States)	#Start Matrix
		for iNode,NodeSource in enumerate(self.States):
			if NodeSource in self.StartDistribution.keys():
				self.SM[iNode] = self.StartDistribution[NodeSource]
		self.TPM = [[0.0]* len(self.States) for s in self.States]	#Transition Probability Matrix
		for iNode,NodeSource in enumerate(self.States):
			for jNode,NodeDestiny in enumerate(self.States):
				if NodeDestiny in self.TransitionDistribution[NodeSource].keys():
					self.TPM[iNode][jNode] = self.TransitionDistribution[NodeSource][NodeDestiny]
		for row in self.TPM:
			if fsum(row)<>1:
				print row,'does not sum to 1'
				return False
		#print self.TPM
		self.EPM = [[0.0]* len(self.States) for o in self.Observations]	#Emission Probability Matrix
		for iNode,NodeDestiny in enumerate(self.Observations):
			for jNode,NodeSource in enumerate(self.States):
				if NodeDestiny in self.EmissionDistribution[NodeSource].keys():
					self.EPM[iNode][jNode] = self.EmissionDistribution[NodeSource][NodeDestiny]
		for icol in range(len(self.States)):
			if fsum([row[icol] for row in self.EPM])<>1:
				print row,'does not sum to 1'
				return False
		#print self.EPM
		return True
Exemplo n.º 19
0
def sums_squared_residuals(dof, model, model_instance, problem_instance):
    """
    compute the ssr for each observable (trajectory) independently
    returns list
    """
    if dof is not None:
        assert(len(dof) == len(problem_instance["parameter_indices"]))
    # TODO: preconditions
    # TODO: more pythonic
    
    if dof is not None:
        for ii in range(len(dof)):
            index = problem_instance["parameter_indices"][ii]
            model_instance["parameters"][index] = dof[ii]
            problem_instance["parameters"][ii] = dof[ii]

    if model is not None:
        model_instance["model"] = model
        cd.print_legacy_code_message()

    sum_res = []
    # TODO: tidy-up
    if len(problem_instance["output_indices"]) == 1:
        sum_res.append(math.fsum(res**2 for res in residuals_st(model, model_instance, problem_instance)))
        return sum_res
    for ii in range(len(problem_instance["outputs"])):
        sum_res.append(math.fsum(res**2 for res in residuals_st(model, model_instance, problem_instance)[ii]))
    return sum_res
Exemplo n.º 20
0
def kl_measures(sender_pop, receiver_pop, n=2):
    msgs = list(itertools.product(range(n), range(n)))
    states = list(itertools.product(range(n), range(n)))
    state_probs = [1. / float(len(states))] * len(states)

    all_cprobs_msg_given_state = collections.defaultdict(list)
    all_cprobs_state_given_msg = collections.defaultdict(list)
    information_contents = collections.defaultdict(list)
    for i, msg in enumerate(msgs):
        cprobs_msg_given_state = []
        for j, state in enumerate(states):
            pr = 0.
            for (sender, sender_prob) in sender_pop:
                if simulation.sender_matrix(sender)[j][i] == 1:
                    pr += sender_prob
            cprobs_msg_given_state.append(pr)
        all_cprobs_msg_given_state[i] = cprobs_msg_given_state

        for j, state in enumerate(states):
            if math.fsum(cprobs_msg_given_state) > 0.:
                prob_state_given_msg = ((state_probs[j] * cprobs_msg_given_state[j]) /
                                        math.fsum(state_probs[k] * cprobs_msg_given_state[k]
                                                    for k in xrange(len(states))))
            else:
                prob_state_given_msg = float('inf')

            all_cprobs_state_given_msg[j].append(prob_state_given_msg)

            if prob_state_given_msg > 0. and not math.isinf(prob_state_given_msg):
                information_contents[i].append(math.log(prob_state_given_msg / state_probs[j]))
            else:
                information_contents[i].append(- float('inf'))

    return (information_contents, all_cprobs_state_given_msg, all_cprobs_msg_given_state)
Exemplo n.º 21
0
def parametros_gamma(v):
	X = math.fsum(v) / float(len(v))
	A = math.log(X) - math.fsum(map(math.log, v)) / len(v)
	alpha = 1.0 / (4*A) * (1.0 + math.sqrt(1.0 + 4.0/3.0 * A))
	lamb = alpha / X
	beta = 1.0 / lamb
	return alpha, beta
Exemplo n.º 22
0
Arquivo: Geom.py Projeto: oserve/PyNMR
def calcDistance(*coords):
    """    Calculate distance according to :
    ((sum of all distances^-6)/number of distances)^-1/6
    or (sum of all distances^-6)^-1/6

    calcDistance.method should be set before use
    """
    result = None

    try:
        distance_list = (sqrt(fsum(sub(*coord) ** 2 for coord in izip(*atoms))) for atoms in product(*coords))
        sum6 = fsum(pow(distance, -6) for distance in distance_list)
        if calcDistance.method == 'ave6':
            number_of_distances = reduce(mul, (len(coord) for coord in coords))
        elif calcDistance.method == 'sum6':
            number_of_distances = 1
        result = pow(sum6/number_of_distances, -1./6)
    except(ValueError, TypeError):
        errors.add_error_message("Problem using coordinates : " +
                                 str(coords) + "\n" +
                                 " and distances list : " +
                                 str([distance for distance in distance_list]) + "\n")
    except AttributeError:
        sys.stderr.write("Please set calcDistance.method before using calcDistance()\n")

    return result
Exemplo n.º 23
0
def genStats(times):
    N = len(times)
    avgTime = math.fsum(times)/ N
    timesMinusMean = [x - avgTime for x in times]
    timesMMSquared = [math.pow(x,2) for x in timesMinusMean]
    var = math.fsum(timesMMSquared) / N
    return avgTime, var
def collapse_duplicates(raw_data):
	# Create dictionary of lists of duplicates
	dup_data = raw_data.get_array()
	set_sp = {}
	set_ab = {}
	set_co = {}
	set_sz = {}
	set_plasmids = {}
	for sp,ab,co in dup_data:
		if 'taxid' in sp: # retain useful information
			name = sp.rpartition('|')[0] # last segment is usually the original chromosome etc name
		else:
			name = sp.partition('_gi|')[0].partition('|')[0].partition('_gca')[0] #the prepended strain name
		set_sp.setdefault(name,[]).append(sp)
		set_ab.setdefault(name,[]).append(ab)
		set_co.setdefault(name,[]).append(co)
		set_sz.setdefault(name,[]).append(co)

	assert(set_ab.keys() == set_co.keys() == set_sp.keys())

	# New, clean dataset for data without duplicates
	undupe = Dataset()

	# Note: we include plasmids in the count total solely because i100 was simulated to include 1x plasmid coverage.
	for k,v in set_sp.items():
		if len(v) == 1: # just add record directly if it has no duplicates
			undupe.add_record(k,set_ab[k][0],set_co[k][0],set_sz[k][0])
		else: # sum counts and average abundances
			undupe.add_record(k,math.fsum(set_ab[k])/len(v),math.fsum(set_co[k]),math.fsum(set_sz[k]))

	print "Number of entries after combining duplicates: {0}".format(len(undupe.species))

	return undupe
Exemplo n.º 25
0
def stats(results, optimal=None):
    sum, notfound, worst = first_stats(results)
    avg = sum / len(results)
    varianza = fsum([(x - avg) ** 2 for x in results]) / len(results)
    scarto = fpformat.fix(sqrt(varianza), 2)
    valori = set(results)
    frequenze = dict(zip(valori, [results.count(v) for v in valori]))
    sorted_frequenze = sorted(frequenze, key=frequenze.get, reverse=True)
    sorted_frequenze = sorted_frequenze[:10]
    if optimal:
        opt_sum, opt_nf, opt_worst = first_stats(optimal)
        opt_avg = opt_sum / len(optimal)
        opt_scarto = fpformat.fix(sqrt(fsum([(x - opt_avg) ** 2 for x in optimal]) / len(optimal)), 2)
        ratio_avg = avg / opt_avg
        ratio_worst = worst / opt_worst
        ratio_scarto = fpformat.fix((float(scarto) / float(opt_scarto)), 2)

    print "-------------------------------------------------"
    print "Statistiche:\t\t\t\tOffline\tOnline\tRapporto"
    print "Numero di test eseguiti:\t\t " + str(len(results)) + "\t" + str(len(optimal))
#    print "Carburante esaurito:\t\t\t " + str(notfound)
    print "Caso peggiore:\t\t\t\t " + str(worst) + "\t" + str(opt_worst) + "\t" + str(ratio_worst)
    print "Media aritmetica dei risultati:\t\t " + str(avg) + "\t" + str(opt_avg) + "\t" + str(ratio_avg)
    print "Scarto quadratico medio:\t\t " + str(scarto) + "\t" + str(opt_scarto) + "\t" + str(ratio_scarto)
    print "I dieci risultati piu' riscontrati:"
    print "Costo:\tOttenuto:\tSotto la media?"
    for el in sorted_frequenze:
        sotto = "media"
        if el < avg:
            sotto = "si"
        elif el > avg:
            sotto = "no"
        print str(el) + "\t" + str(frequenze[el]) + "\t\t" + sotto
Exemplo n.º 26
0
def inTheGrandSchemeOfThings(nrNodesMin, nrNodesMax, algorithm):
    listOfAverage = [[0.0], []]
    listOfMaximum = [[0.0], []]
    listOfMedian = [[0.0], []]

    for nrNodes in range(nrNodesMin, nrNodesMax, 2):
        listOfNodes = setupDesign(nrNodes, algorithm)
        maxLevel = len(listOfNodes) - 1
        while len(listOfAverage) <= maxLevel and len(listOfMaximum) <= maxLevel and len(listOfMedian) <= maxLevel:
            listOfAverage.append([])
            listOfMaximum.append([])
            listOfMedian.append([])
        for level in range(1, maxLevel + 1):
            average = Measurements.averageDistance(listOfNodes, level)
            listOfAverage[level].append(average / level)
            maximum = Measurements.maximumDistance(listOfNodes, level)
            listOfMaximum[level].append(maximum / level)
            median = Measurements.distanceMedian(listOfNodes, level)
            listOfMedian[level].append(median / level)
    for i in range(1, len(listOfAverage)):
        listOfAverage[i] = math.fsum(listOfAverage[i]) / len(listOfAverage[i])
        listOfMaximum[i] = math.fsum(listOfMaximum[i]) / len(listOfMaximum[i])
        listOfMedian[i] = math.fsum(listOfMedian[i]) / len(listOfMedian[i])
    del listOfAverage[0]
    del listOfMaximum[0]
    del listOfMedian[0]
    plt.plot(range(1, 1 + len(listOfAverage)), listOfAverage, label = 'Average')
    plt.plot(range(1, 1 + len(listOfMaximum)), listOfMaximum, label = 'Maximum')
    plt.plot(range(1, 1 + len(listOfMedian)), listOfMedian, label = 'Median')
    plt.legend(loc=3)
    plt.axis([0, maxLevel, 0, 1.1])
    plt.show()
Exemplo n.º 27
0
def render_summary(stats):
    """
    Render summary of an event stream run.
    :param stats: Dictionary('clock':list()<float>, 'rss':list()<int>)
    :return: Void.
    """
    print('\nSummary profile from stream execution:')
    print('Samples: %i' % len(stats['clock']))
    if -1 in stats['clock']:
        print('(ERRORS DETECTED: Removing timing samples from aborted invocations.)')
        stats['clock'] = [x for x in stats['clock'] if x > 0]
        print('New sample size: %i' % len(stats['clock']))
    median = sorted(stats['clock'])[math.trunc(len(stats['clock']) / 2)]
    print(stats['clock'])
    mean =  math.fsum(stats['clock'])/len(stats['clock'])
    print('Clock time:\n'
          '\tMin: %ims, Max: %ims, Median: %ims, Median Billing Bucket: %ims, Rounded Standard Deviation: %sms' % (
              min(stats['clock']),
              max(stats['clock']),
              median,
              billing_bucket(median),
              math.trunc(math.ceil(math.sqrt(math.fsum((x-mean)**2 for x in stats['clock'])/(len(stats['clock'])-1))))
          )) if len(stats['clock']) > 0 else print("No valid timing samples!")
    print('Peak resident set size (memory):\n'
          '\tMin: %s, Max: %s' % (
              size(min(stats['rss'])),
              size(max(stats['rss']))
          ))
Exemplo n.º 28
0
def _generalised_sum(data, func):
    """_generalised_sum(data, func) -> len(data), sum(func(items of data))

    Return a two-tuple of the length of data and the sum of func() of the
    items of data. If func is None, use just the sum of items of data.
    """
    # Try fast path.
    try:
        count = len(data)
    except TypeError:
        # Slow path for iterables without len.
        # We want to support BIG data streams, so avoid converting to a
        # list. Since we need both a count and a sum, we iterate over the
        # items and emulate math.fsum ourselves.
        ap = add_partial
        partials = []
        count = 0
        if func is None:
            # Note: we could check for func is None inside the loop. That
            # is much slower. We could also say func = lambda x: x, which
            # isn't as bad but still somewhat expensive.
            for count, x in enumerate(data, 1):
                ap(x, partials)
        else:
            for count, x in enumerate(data, 1):
                ap(func(x), partials)
        total = math.fsum(partials)
    else: # Fast path continues.
        if func is None:
            # See comment above.
            total = math.fsum(data)
        else:
            total = math.fsum(func(x) for x in data)
    return count, total
Exemplo n.º 29
0
def compute_volume(mesh):
    if "tetra" in mesh.cells:
        vol = math.fsum(
            get_simplex_volumes(*prune_nodes(mesh.points, mesh.cells["tetra"]))
        )
    elif "triangle" in mesh.cells or "quad" in mesh.cells:
        vol = 0.0
        if "triangle" in mesh.cells:
            # triangles
            vol += math.fsum(
                get_triangle_volumes(*prune_nodes(mesh.points, mesh.cells["triangle"]))
            )
        if "quad" in mesh.cells:
            # quad: treat as two triangles
            quads = mesh.cells["quad"].T
            split_cells = numpy.column_stack(
                [[quads[0], quads[1], quads[2]], [quads[0], quads[2], quads[3]]]
            ).T
            vol += math.fsum(
                get_triangle_volumes(*prune_nodes(mesh.points, split_cells))
            )
    else:
        assert "line" in mesh.cells
        segs = numpy.diff(mesh.points[mesh.cells["line"]], axis=1).squeeze()
        vol = numpy.sum(numpy.sqrt(numpy.einsum("...j, ...j", segs, segs)))

    return vol
Exemplo n.º 30
0
	def test_Poisson_sum_times(self):
		print("Testing Poisson sum times")
		k_max=400000
		lmbda=100000
		print("k_max=%s, lmbda=%s" % (k_max,lmbda))
		gt1=time.clock()
		s=math.fsum([bayes.PoissonApprox2(x,lmbda) for x in range(k_max)])
		gt2=time.clock()
		print("Time: %s seconds" % (gt2-gt1,))
		k_max=400000
		lmbda=200000
		print("k_max=%s, lmbda=%s" % (k_max,lmbda))
		gt1=time.clock()
		s=math.fsum([bayes.PoissonApprox2(x,lmbda) for x in range(k_max)])
		gt2=time.clock()
		print("Time: %s seconds" % (gt2-gt1,))
		k_max=600000
		lmbda=200000
		print("k_max=%s, lmbda=%s" % (k_max,lmbda))
		gt1=time.clock()
		s=math.fsum([bayes.PoissonApprox2(x,lmbda) for x in range(k_max)])
		gt2=time.clock()
		print("Time: %s seconds" % (gt2-gt1,))
		k_max=800000
		lmbda=200000
		print("k_max=%s, lmbda=%s" % (k_max,lmbda))
		gt1=time.clock()
		s=math.fsum([bayes.PoissonApprox2(x,lmbda) for x in range(k_max)])
		gt2=time.clock()
		print("Time: %s seconds" % (gt2-gt1,))

def walk_maze(m, n, cell, indx):
    # fill cell
    cell[n][m] = indx
    # down
    if n < N - 1 and cell[n + 1][m] == NOT_CLUSTERED:
        walk_maze(m, n + 1, cell, indx)
    # right
    if m < M - 1 and cell[n][m + 1] == NOT_CLUSTERED:
        walk_maze(m + 1, n, cell, indx)
    # left
    if m and cell[n][m - 1] == NOT_CLUSTERED:
        walk_maze(m - 1, n, cell, indx)
    # up
    if n and cell[n - 1][m] == NOT_CLUSTERED:
        walk_maze(m, n - 1, cell, indx)


if __name__ == '__main__':
    cell = newgrid(n=N, p=0.5)
    print('Found %i clusters in this %i by %i grid\n' %
          (clustercount(cell), N, N))
    pgrid(cell)
    print('')

    for n in n_range:
        N = M = n
        sim = fsum(cluster_density(n, p) for i in range(t)) / t
        print('t=%3i p=%4.2f n=%5i sim=%7.5f' % (t, p, n, sim))
Exemplo n.º 32
0
def softmax(x):
    y = [math.exp(k) for k in x]
    sum_y = math.fsum(y)
    z = [k/sum_y for k in y]

    return z
Exemplo n.º 33
0
def GMM(data,K):
    
    n_feat = data.shape[0] 
    n_obs = data.shape[1] 
    
    
    def gaussian(x,mean,cov):
        det_cov = np.linalg.det(cov)
        cov_inv = np.zeros_like(cov)
        for i in range(n_obs):
            cov_inv[i,i] = 1/cov[i,i]
        diff = np.matrix(x-mean)
        
        N = (2.0 * np.pi) ** (-len(data[1]) / 2.0) * (1.0 / (np.linalg.det(cov) ** 0.5)) *\
            np.exp(-0.5 * np.sum(np.multiply(diff*cov_inv,diff),axis=1))
        return N
    
    
    def initialize():
        mean = np.array([data[np.random.choice(n_feat,1)]],np.float64)
        cov = [np.random.randint(1,255)*np.eye(n_obs)]
        cov = np.matrix(np.multiply(cov,np.random.rand(n_obs,n_obs)))
        return {'mean': mean, 'cov': cov}
   
   
   
    bound = 0.0001
    max_itr = 500
    
    parameters = [initialize() for cluster in range (K)]
    cluster_prob = np.ndarray([n_feat,K],np.float64)
    
    #EM - step E
    itr = 0
    mix_c = [1./K]*K
    log_likelihoods = []
    while (itr < max_itr):
        print(itr)
        itr+=1
        
        
        for cluster in range (K):
            cluster_prob[:,cluster:cluster+1] = gaussian(data,parameters[cluster]['mean'],parameters[cluster]['cov'])*mix_c[cluster]
            
        
        cluster_sum = np.sum(cluster_prob,axis=1)
        log_likelihood = np.sum(np.log(cluster_sum))
        
        log_likelihoods.append(log_likelihood)
        
        cluster_prob = np.divide(cluster_prob,np.tile(cluster_sum,(K,1)).transpose())
        
        Nk = np.sum(cluster_prob,axis = 0) #2
        
        
        #EM - step M
        for cluster in range (K):
            temp_sum = math.fsum(cluster_prob[:,cluster])
            new_mean = 1./ Nk[cluster]* np.sum(cluster_prob[:,cluster]*data.T,axis=1).T
            
            parameters[cluster]['mean'] = new_mean
            diff = data - parameters[cluster]['mean']
            new_cov = np.array(1./ Nk[cluster]*np.dot(np.multiply(diff.T,cluster_prob[:,cluster]),diff)) 
            parameters[cluster]['cov'] = new_cov
            mix_c[cluster] = 1./ n_feat * Nk[cluster]
            
            
       
        if len(log_likelihoods)<2: continue
        if np.abs(log_likelihood-log_likelihoods[-2])<bound : break
    
    return mix_c,parameters
Exemplo n.º 34
0
def avg(l):
    return math.fsum([v for v in l]) / float(len(l))
Exemplo n.º 35
0
def stddev(l):
    l_avg = avg(l)
    return math.sqrt((1 / (float(len(l)) - 1)) * math.fsum(
        ((v - l_avg)**2) for v in l))
Exemplo n.º 36
0
# ---------------------------------------------------------------------------- #
#*                                    IMPORTS                                   #
# ---------------------------------------------------------------------------- #

import math

print(math.pi * (5**2))  # pi * 5^2

## fsum() --> adds together floating point numbers. If we use just sum(), we will get weird results because of the nature of floating point numbers.
# See: https://blog.tecladocode.com/decimal-vs-float-in-python/ for more information

numbers = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
print(math.fsum(numbers))  # we get exactly 1.0

#* ------------------ Importing specific things from modules ------------------ #

## We can import specific parts of a module - let's say we just want the constant pi:
from math import pi, tau

# NOTE: now when we want to call those constants above, we do not need the math. prior to it:
print(pi, tau)

#* -------------------------- Modules and Namespaces -------------------------- #

import math
print(
    globals()
)  # NOTE: we can see that the functions/parameters live only in the math namespace, which means we need to access them via the . (dot) operator. I.e., math.pi, math.fsum(), etc.

from math import pi, tau
print(
Exemplo n.º 37
0
 def error(stump, weights):
     '''Calculate the weighted error of the given stump.'''
     _, _, mistakes = stump
     return math.fsum(weights[m] for m in mistakes)
Exemplo n.º 38
0
from collections import defaultdict
import math
from functools import partial

def prob(start, end, observed_indels, readlen):
    if start == end:
        varcov = 0
    elif start > end:
        return 0.0
    else:
        varcov = sum(c for p, c in observed_indels.items() if p >= start and p < end) / (end - start)
    p = 1.0
    for s in range(readlen):
        count = observed_indels[s]
        c = varcov if s >= start and s < end else 0
        p *= poisson.pmf(count, c)
    return p


observed = defaultdict(int)
observed.update({25: 1, 52: 1, 46: 1, 42: 1, 37: 1})
readlen = 100

y = np.array([prob(start, end, observed, readlen) for start in range(readlen) for end in range(readlen)])
y = y.reshape((readlen, readlen))
marginal = math.fsum(y.reshape(10000))
y /= marginal

plt.imshow(y)
plt.show()
Exemplo n.º 39
0
 def add(self, *arg):
     self.sum += arg
     while len(self.sum) >= 5:
         print(int(fsum(self.sum[0:5])))
         self.sum = self.sum[5:]
Exemplo n.º 40
0
def average_time_relative(dict_list):
    """
    let us define the optimal time as the time it would have taken the navigator
    to reach it's final position if it were to move in straight line
    the relative navigation time is define as measured time/optimal time
    this function calculates the relative navigation time for each successful navigator
    and returns an average
    """

    ##(x,y,T,odor,gamma,state,success)
    def first_movement(diff_list):
        for i in range(len(diff_list)):
            if diff_list[i][5] != 'wait':
                #print 'state = '+ diff_list[i][5]
                return i

    def calc_speed(diff_list):
        i = first_movement(diff_list)
        x1 = diff_list[i][0]
        y1 = diff_list[i][1]
        x2 = diff_list[i + 1][0]
        y2 = diff_list[i + 1][1]
        dist = distance(x1, y1, x2, y2)
        dt = diff_list[i + 1][2] - diff_list[i][2]
        speed = dist / dt
        return speed

    def optimal_time(diff_list):
        x1 = diff_list[0][0]
        y1 = diff_list[0][1]
        x2 = diff_list[-1][0]
        y2 = diff_list[-1][1]
        opt_dist = distance(x1, y1, x2, y2)

        speed = calc_speed(diff_list)
        optimal_time = opt_dist / speed

        return optimal_time

    def distance(x1, y1, x2, y2):
        x = x2 - x1
        y = y2 - y1
        return (x**2 + y**2)**0.5

    def optimal_distance(diff_list):
        x1 = diff_list[0][0]
        y1 = diff_list[0][1]
        x2 = diff_list[-1][0]
        y2 = diff_list[-1][1]
        return distance(x1, y1, x2, y2)

    def traveled_distance(diff_list):
        dist_sum = 0.0
        for i in range(1, len(diff_list)):
            if diff_list[i][-2]:
                x1 = diff_list[i - 1][0]
                y1 = diff_list[i - 1][1]
                x2 = diff_list[i][0]
                y2 = diff_list[i][1]
                dist_sum += distance(x1, y1, x2, y2)
        return dist_sum

    def time_elasped(diff_list):
        i = first_movement(diff_list)
        time_ela = diff_list[i][2] - diff_list[-1][2]
        return time_ela

    times_list = []
    for diff_dict in dict_list:
        for key in diff_dict:
            diff_list = diff_dict[key]
            if diff_list[-1][-1]:
                """
                optimal = optimal_distance(diff_list)
                traveled = traveled_distance(diff_list)
                #print (optimal,traveled)
                relative_time = traveled/optimal
                """
                elasped = time_elasped(diff_list)
                optimal = optimal_time(diff_list)
                relative_time = elasped / optimal
                times_list.append(math.fabs(relative_time))

    #print times_list
    if len(times_list) != 0:
        average = math.fsum(times_list) / float(len(times_list))
    else:
        average = 0
    return average
Exemplo n.º 41
0
# import the math module and use help and dir to get information about its capabilities and attributes
import math
help(math)
print(dir(math)) # Узнать набор методов объекта

# Using the math module\n",
# Create a list with 5 numbers\n",
# Use the math module to find the square root of the numbers in the list\n",
# Use the math module to truncate the number 123.45\n",
# Use the math module to summarize the numbers in the list\n"

list1 = [2,4,6,8,10,12]
print("The square root of 9: ", math.sqrt(81))
print("Truncate 123.45: ", math.trunc(123.45))
print("Sum of the numbers in list1: ", math.fsum(list1))

# Help with the statistics module
# import the statistics module and use help and dir to get information about its capabilities and attributes
import statistics
help(statistics)
print(dir(statistics))

# Using the statistics module
# create a list that has 5 numbers
# Use the statistics module to find the average of the numbers
# Use the statistics module to find the median of the numbers
# Use the statistics module to find the standard deviation of the numbers

ages = [22,14,50,34,78,18,24]
print("Average age: ", statistics.mean(ages))
Exemplo n.º 42
0
def _mean(data):
    assert len(data), '_mean() received no data to average.'
    return math.fsum(data) / float(len(data))
Exemplo n.º 43
0
 def next(self):
     data = self.data.get(size=self.p.period)
     dataweighted = map(operator.mul, data, self.p.weights)
     self.line[0] = self.p.coef * math.fsum(dataweighted)
Exemplo n.º 44
0
  def trials(o, n=500, out="out.csv", verbose=True, write=False):
    import csv
    keys = []
    _efforts = []
    _months = []
    _defects = []
    _risks = []
    _first = 0
    rows = []
    with open(out, 'w') as csv_file:
      if write:
        csv_wri = csv.writer(csv_file)
      for _i in range(0, n):
        x = o.x()
        if _i == 0:
          for _k, _ in x.iteritems():
            if _first == 0:
              keys.append('$' + str(_k))
              _first = 1
            else:
              keys.append('$' + str(_k))  # changed_nave
          keys.extend(["-effort", "-months",
                       "-defects", "-risks"])
          if write:
            csv_wri.writerows([keys])
        a = x["b"]
        b = o.all["b"].y(a, reset=True)
        kloc = x["kloc"]
        sum = o.sumSfs(x, reset=True)
        prod = o.prodEms(x, reset=True)
        exp = b + 0.01 * sum
        effort = o.effort_calc(x, a, b, exp, sum, prod)
        months = o.month_calc(x, effort, sum, prod)
        defects = o.defect_calc(x)
        risks = o.risk_calc(x)
        _efforts.append(effort)
        _months.append(months)
        _defects.append(defects)
        _risks.append(risks)
        vals = []
        for _, _v in x.iteritems():
          vals.append(_v)
        vals.extend([effort, months, defects, risks])
        if write:
          csv_wri.writerows([vals])
        rows.append(vals)

    if verbose:
      _effSum = math.fsum(_efforts)
      _mosSum = math.fsum(_months)
      _defSum = math.fsum(_defects)
      _rskSum = math.fsum(_risks)
      _effMean = _effSum / n
      _mosMean = _mosSum / n
      _defMean = _defSum / n
      _rskMean = _rskSum / n
      _effSD = pow(
          math.fsum(
              map(
                  lambda x: pow(
                      x -
                      _effMean,
                      2),
                  _efforts)) /
          n,
          0.5)
      _mosSD = pow(
          math.fsum(
              map(
                  lambda x: pow(
                      x -
                      _mosMean,
                      2),
                  _months)) /
          n,
          0.5)
      _defSD = pow(
          math.fsum(
              map(
                  lambda x: pow(
                      x -
                      _defMean,
                      2),
                  _defects)) /
          n,
          0.5)
      _rskSD = pow(
          math.fsum(
              map(
                  lambda x: pow(
                      x -
                      _rskMean,
                      2),
                  _risks)) /
          n,
          0.5)
      _efforts.sort()
      _months.sort()
      _defects.sort()
      _risks.sort()
      print "Means:"
      print "\tEff:", _effMean, "\n\tMos:", _mosMean, "\n\tDef:", _defMean, "\n\tRsk:", _rskMean
      print ""
      print "Standard Deviations:"
      print "\tEff:", _effSD, "\n\tMos:", _mosSD, "\n\tDef:", _defSD, "\n\tRsk:", _rskSD
      print ""
      print "Quartile Bounds (25/50/75):"
      print "\tEff:", _efforts[int(.25 * n)], "\t",\
          _efforts[int(.5 * n)], "\t",\
          _efforts[int(.75 * n)],  \
          "\n\tMos:", _months[int(.25 * n)], "\t",\
          _months[int(.5 * n)], "\t",\
          _months[int(.75 * n)],  \
          "\n\tDef:", _defects[int(.25 * n)], "\t",\
          _defects[int(.5 * n)], "\t",\
          _defects[int(.75 * n)],  \
          "\n\tRsk:", _risks[int(.25 * n)], "\t",\
          _risks[int(.5 * n)], "\t",\
          _risks[int(.75 * n)]
    return keys, rows
Exemplo n.º 45
0
print("Redondeando para abajo 3.98:", math.floor(3.98))

#Redondear hacia arriba
print("Redondeando para arriba 3.56:", math.ceil(3.56))

#Valor Absoluto
print("Valor absoluto de |-10|:", abs(-10))

#Sumatorio
n = [1, 2, 3]

#Suma los elementos de la lista
print("Lista n \n", n)
print("Suma de la lista n:", sum(n))

print("Suma de la lista n, en flotante:", math.fsum(n))

#Truncar Parte decimal
print(100.121323548)
print("Truncar la parte decimal:", math.trunc(100.121323548))

#Generar numero flotante Aleatorio entre el [0,1)
print("\n\n")
print("Numero aleatorio: ", random.random())

#Genera un numero entre el [1,10)
print("Numero aleatorio: ", random.uniform(1, 10))

#Generar numero Entero Aleatorio
print("Numero aleatorio:", random.randrange(10))  #[0,10)
print("Numero aleatorio:", random.randrange(0, 101))  #[0,101)
Exemplo n.º 46
0
# problem 1099
from math import fsum
N = int(input())
box = [] * N
boxOdd = []

for i in range(N):
    num1, num2 = map(int, input().split())
    boxOdd = []
    for j in range(abs(num2 - num1) - 1):
        if num2 > num1 and num1 != num2:
            num1 = num1 + 1
            if num1 % 2 != 0:
                boxOdd.append(num1)
        if num1 > num2 and num1 != num2:
            num2 = num2 + 1
            if num2 % 2 != 0:
                boxOdd.append(num2)
    box.insert(i, int(fsum(boxOdd)))
for p in range(len(box)):
    print(box[p])
Exemplo n.º 47
0
    def score(self, hypothesis, corpus, n=1):
        # containers
        count = [0, 0, 0, 0]
        clip_count = [0, 0, 0, 0]
        r = 0
        c = 0
        weights = [0.25, 0.25, 0.25, 0.25]

        # accumulate ngram statistics
        for hyps, refs in zip(hypothesis, corpus):
            # if type(hyps[0]) is list:
            #    hyps = [hyp.split() for hyp in hyps[0]]
            # else:
            #    hyps = [hyp.split() for hyp in hyps]
            # import pdb
            # pdb.set_trace()
            hyps = [hyp.split() for hyp in hyps]
            refs = [ref.split() for ref in refs]
            # import pdb
            # pdb.set_trace()
            # hyps = [hyps]
            # Shawn's evaluation
            # refs[0] = [u'GO_'] + refs[0] + [u'EOS_']
            # hyps[0] = [u'GO_'] + hyps[0] + [u'EOS_']

            for idx, hyp in enumerate(hyps):
                for i in range(4):
                    # accumulate ngram counts
                    hypcnts = Counter(ngrams(hyp, i + 1))
                    cnt = sum(hypcnts.values())
                    count[i] += cnt
                    # import pdb
                    # pdb.set_trace()
                    # compute clipped counts
                    max_counts = {}
                    for ref in refs:
                        refcnts = Counter(ngrams(ref, i + 1))
                        for ng in hypcnts:
                            max_counts[ng] = max(max_counts.get(ng, 0),
                                                 refcnts[ng])
                    clipcnt = dict((ng, min(count, max_counts[ng])) \
                                   for ng, count in hypcnts.items())
                    clip_count[i] += sum(clipcnt.values())

                # accumulate r & c
                bestmatch = [1000, 1000]
                for ref in refs:
                    if bestmatch[0] == 0: break
                    diff = abs(len(ref) - len(hyp))
                    if diff < bestmatch[0]:
                        bestmatch[0] = diff
                        bestmatch[1] = len(ref)
                r += bestmatch[1]
                c += len(hyp)
                if n == 1:
                    break
        # computing bleu score
        p0 = 1e-7
        bp = 1 if c > r else math.exp(1 - float(r) / float(c))
        p_ns = [float(clip_count[i]) / float(count[i] + p0) + p0 \
                for i in range(4)]
        s = math.fsum(w * math.log(p_n) \
                      for w, p_n in zip(weights, p_ns) if p_n)
        bleu = bp * math.exp(s)
        return bleu
Exemplo n.º 48
0
 def next(self):
     self.line[0] = \
         math.fsum(self.data.get(size=self.p.period)) / self.p.period
Exemplo n.º 49
0
def main():
    candidate_path = sys.argv[1]
    reference_path = sys.argv[2]

    # single file
    if os.path.isfile(reference_path):
        can_len, ref_len = 0, 0

        uni_c, uni_t = 0, 0
        bi_c, bi_t = 0, 0
        tri_c, tri_t = 0, 0
        four_c, four_t = 0, 0

        candidate = codecs.open(candidate_path, encoding='utf-8')
        reference = codecs.open(reference_path, encoding='utf-8')

        while 1:
            c_line = candidate.readline()
            r_line = reference.readline()
            if not c_line:
                break
            if not r_line:
                break
            c_line = c_line.strip()
            r_line = r_line.strip()
            can_len += len(c_line.split(" "))
            ref_len += len(r_line.split(" "))

            c_c, t_c = get_count(c_line, r_line, 1)
            uni_c += c_c
            uni_t += t_c

            c_c, t_c = get_count(c_line, r_line, 2)
            bi_c += c_c
            bi_t += t_c

            c_c, t_c = get_count(c_line, r_line, 3)
            tri_c += c_c
            tri_t += t_c

            c_c, t_c = get_count(c_line, r_line, 4)
            four_c += c_c
            four_t += t_c

        uni_p = modified_precision(uni_c, uni_t)
        bi_p = modified_precision(bi_c, bi_t)
        tri_p = modified_precision(tri_c, tri_t)
        four_p = modified_precision(four_c, four_t)
        bp = brevity_penalty(can_len, ref_len)
        print uni_c, uni_t, uni_p
        print bi_c, bi_t, bi_p
        print tri_c, tri_t, tri_p
        print four_c, four_t, four_p
        print can_len, ref_len, bp

        val = [0.25 * uni_p, 0.25 * bi_p, 0.25 * tri_p, 0.25 * four_p]

        tmpsum = math.fsum(val)
        score = bp * math.exp(tmpsum)

        print score
        with open('bleu_out.txt', 'w') as fout:
            fout.write(str(score))

    # directory
    else:
        uni_ref_list = construct_ref_dic_list(reference_path, 1)
        bi_ref_list = construct_ref_dic_list(reference_path, 2)
        tri_ref_list = construct_ref_dic_list(reference_path, 3)
        four_ref_list = construct_ref_dic_list(reference_path, 4)

        uni_can_list = construct_can_dic_list(candidate_path, 1)
        bi_can_list = construct_can_dic_list(candidate_path, 2)
        tri_can_list = construct_can_dic_list(candidate_path, 3)
        four_can_list = construct_can_dic_list(candidate_path, 4)

        uni_c, uni_t = get_clipped_count(uni_can_list, uni_ref_list)
        bi_c, bi_t = get_clipped_count(bi_can_list, bi_ref_list)
        tri_c, tri_t = get_clipped_count(tri_can_list, tri_ref_list)
        four_c, four_t = get_clipped_count(four_can_list, four_ref_list)

        can_len = get_candidate_length(candidate_path)
        ref_len = get_reference_length(reference_path)

        uni_p = modified_precision(uni_c, uni_t)
        bi_p = modified_precision(bi_c, bi_t)
        tri_p = modified_precision(tri_c, tri_t)
        four_p = modified_precision(four_c, four_t)
        bp = brevity_penalty(can_len, ref_len)

        val = [0.25 * uni_p, 0.25 * bi_p, 0.25 * tri_p, 0.25 * four_p]

        tmpsum = math.fsum(val)
        score = bp * math.exp(tmpsum)

        print score
        with open('bleu_out.txt', 'w') as fout:
            fout.write(str(score))
Exemplo n.º 50
0
 def total(self) -> float:
     return fsum(e.total for e in self.expenses)
Exemplo n.º 51
0
"""Make a program which reads a text file, which can have any number of floating point numbers, which are all in their own lines.
The program will calculate and display the sum and average (mean) of all these numbers. Make your program fool proof.
It should handle all common errors and ignore those lines of the input text file, which don't have valid numbers."""

from fileinput import input as fileInput
from math import fsum
from statistics import mean
floatNumbers = []

with fileInput(files="calcFromFile.txt") as fileContent:
    for line in fileContent:
        try:
            line = float(line)
            floatNumbers.append(line)
        except ValueError:
            print("Line", fileContent.lineno(),
                  ": Not a float value. Ignoring...")
        except TypeError:
            print("Line", fileContent.lineno(),
                  ": Not a float value. Ignoring...")

print("Sum of given values: ", fsum(floatNumbers))
print("Mean of given values: ", mean(floatNumbers))
Exemplo n.º 52
0
#!/usr/bin/env python

import math

cards = int(input())
sum_of_cards = int(math.fsum(range(cards + 1)))

for i in range(cards - 1):
    sum_of_cards -= int(input())

print(sum_of_cards)
Exemplo n.º 53
0
def histogram_entropy_py(image):
    """ Calculate the entropy of an images' histogram. """
    from math import log2, fsum
    histosum = float(color_count(image))
    histonorm = (histocol / histosum for histocol in image.histogram())
    return -fsum(p * log2(p) for p in histonorm if p != 0.0)
Exemplo n.º 54
0
        chrm, coord, data = extractLine(line)

        replicates = getReplicates(data,
                                   quads=False if side == "absolute" else True)

        # this should work for both absolute and left/right positioning
        ameth, aunmeth = replicates[a][2:4] if side == "right" else replicates[
            a][0:2]
        bmeth, bunmeth = replicates[b][2:4] if side == "right" else replicates[
            b][0:2]

        # only write the percentages if at least minReads number of reads
        if (ameth + aunmeth >= minReadsInAtLeastOneSampleForPercentage
                or bmeth + bunmeth >= minReadsInAtLeastOneSampleForPercentage
            ) and (ameth + aunmeth >= minReadsInBothSamplesForPercentage
                   and bmeth + bunmeth >= minReadsInBothSamplesForPercentage):

            aPercentage = (float(ameth) / float(ameth + aunmeth)) * 100.0
            bPercentage = (float(bmeth) / float(bmeth + bunmeth)) * 100.0

            bins[int(aPercentage)].append(bPercentage)

            x.append(aPercentage)
            y.append(bPercentage)

    print "Pearson:" + str(pearsonr(x, y))

    for i in bins:
        avg = math.fsum(bins[i]) / len(bins[i])
        print i, avg, avg - float(i)
Exemplo n.º 55
0
 def compute(self,values):
     return math.fsum(values)
Exemplo n.º 56
0
def main(argv):

    # Model parameters 
    ###############################################################################
    
    WS=3.7e-4       # [1/s]  A-attached type grownth rate
    WA=0.5*WS         # [1/s]  S-swimming type grownth rate
    Xlength=350.0     # [mkm] total length of the layer in mkm
    D0=0.1 #float(sys.argv[2])          # [mkm2/s] diffusion constant
    
    Nlayers=100
    Nstep=10          # layers with step
    StepF=float(sys.argv[2])
    D=[D0 for i in range(Nlayers)]
    Dstep=[StepF*D0 for i in range(Nstep)]
    D[0:Nstep]=Dstep
    P=float(sys.argv[1])           # [] adsorbtion probability
    
    #Simulation parameters
    
    dx=float(Xlength)/Nlayers
    #X=[(i+1)*dx for i in range(Nlayers)]
    
    
    dt=0.005
    NtimeSteps=2000000 #- 3000000 8 hours
    Lambda=[D[i]*dt/(dx*dx) for i in range(Nlayers)]
    
    
    # Saving data to the file # how frequent to save
    Nfreq=NtimeSteps/1000                      
    NTsave=int(NtimeSteps/Nfreq)
    #T=[i*Nfreq for i in range(NTsave+1)]
    SProfile=np.zeros((Nlayers,NTsave))
    Stotal=np.zeros((NTsave,1))               
    Total=np.zeros((NTsave,1))      
    A=np.zeros((NTsave,3))          #  3colums array A-sep A-grown A-transition
    
    
    # Initial conditions
    # S-initial condition as constant
    #mu=Xlength/2.0
    #sigma=Xlength*0.1
    S0=10000.0
    Sprev=[S0 for i in range(Nlayers)]
    TotalStep0=np.sum(Sprev)
    Stotal[0]=TotalStep0
    #plt.plot(X,Sprev)
    #plt.show()
    
    
    # A-initial condition
    Aprev=0
    
    # Initial condition
    SProfile[0:Nlayers,0]=Sprev
    Total[0]=0
    
    
    ###############################################################################
    # Backward Euler scheme
    ###############################################################################
    
    # Coefficient matrix
    Smatrix=np.zeros((Nlayers,Nlayers))
    for i in range(1,Nlayers-1):
        Smatrix[i,i-1]=-Lambda[i]
        Smatrix[i,i]=1+Lambda[i]+Lambda[i+1]-WS*dt
        Smatrix[i,i+1]=-Lambda[i+1]
        
     # Boundary conditions
    # Adsorbing boundary 
    Smatrix[0,0]=1+Lambda[1]+Lambda[0]-Lambda[0]*(1-P)-WS*dt
    Smatrix[0,1]=-Lambda[1]
    # Reflective boundary
    Smatrix[Nlayers-1,Nlayers-2]=-Lambda[Nlayers-1]
    Smatrix[Nlayers-1,Nlayers-1]=1+Lambda[Nlayers-1]-WS*dt   
    SMatrixSparse = csr_matrix(Smatrix)
    #SMatrixSparse = SMatrixSparse.astype(np.float64)
     
    # Solve matrix equations Smatrix
    counter=1;
    for i in range(1,NtimeSteps):
        Snext=spsolve(SMatrixSparse,Sprev)
        Anext=(P*Lambda[0]*Snext[0]+Aprev)/(1-WA*dt) #
        
        # save profile every Nfreq steps
        if (i % Nfreq)==0:
            #print(i)
            SProfile[0:Nlayers,counter]=Snext
            Stotal[counter]=fsum(Snext)
            A[counter,0]=Anext
            A[counter,1]=WA*Aprev*dt
            A[counter,2]=P*Lambda[0]*Snext[0]
            Total[counter]=Anext+fsum(Snext)-TotalStep0
            counter=counter+1
        
        # update
        Aprev=Anext
        Sprev=Snext
        
    ###############################################################################
    # Save to file    
    ###############################################################################
#    plt.plot(Total)
#    plt.title("Total")
#    plt.show()
#    plt.plot(SProfile)
#    plt.title("Sprofile")
#    plt.show()
#    plt.plot(A[1:NTsave,0])
#    plt.title("Atotal")
#    
        
    # files with profile
    root_folder=''
    datastamp='ID_'+str(randint(100,999))+'_' 
    #datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    filename_Sprofile=root_folder+datastamp+'__Stype_profile.txt'
    filename_A=root_folder+datastamp+'__Atype_profile.txt';
    filename_Total=root_folder+datastamp+'__Total.txt';
    filename_STotal=root_folder+datastamp+'__STotal.txt';
    filename_Dprofile=root_folder+datastamp+'__D.txt';
    
    np.savetxt(filename_Sprofile,SProfile,delimiter='\t')
    np.savetxt(filename_A,A,delimiter='\t')
    np.savetxt(filename_Total,Total,delimiter='\t')
    np.savetxt(filename_STotal,Stotal,delimiter='\t')
    np.savetxt(filename_Dprofile,D,delimiter='\t')
    
    #file with parameters
    filename_parameters=root_folder+datastamp+'__ParametersFile.txt';
    Text=[]
    Text.append("WA\t%4.10f\t1/sec\t\n" %WA)
    Text.append("WS\t%4.10f\t1/sec\t \n" %WS)
    Text.append("Xlength\t%4.4f\tmkm\t\n" %Xlength)
    Text.append("D0\t%4.4f\tmkm^2/s\t\n" %D0)
    Text.append("StepF\t%4.4f\t[]/s\t\n" %StepF)
    Text.append("Nstep\t%4.4f\t[]/s\t\n" %Nstep)
    Text.append("P\t%4.4f\t[]\t\n" %P)
    Text.append("dt\t%4.4f\ts\t\n" %dt)
    Text.append("Nlayers\t%4.4f\t[]\t\n" %Nlayers)
    
    TimeStep=dt*Nfreq
    Text.append("Nfreq\t%4.4f\t[]\t\n" % Nfreq)
    Text.append("NtimeSteps\t%4.4f\t[]\t\n" %NtimeSteps)
    Text.append("TimeStep\t%4.4f\t[]\t\n" %TimeStep)
    
    f = open(filename_parameters, 'a')
    for line in Text:
        #print(line)
        f.write(line)
    f.close()
Exemplo n.º 57
0
def normalize(vector):
    norm = math.sqrt(math.fsum([v**2 for v in vector]))
    if norm == 0:
        print "Null vector! Can't normalize."

    return [element / norm for element in vector]
Exemplo n.º 58
0
# coding:utf8
"""
python3 标准库 正则表达式
"""
import math
# 普通求和
print(sum([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1]))
# 返回迭代中的精确浮点值。通过跟踪多个中间部分和来避免精度损失
print(math.fsum([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1]))
Exemplo n.º 59
0
 def _coincidence_index(cls, text):
     coincidence = fsum([
         (frequency / len(text)) * ((frequency - 1) / (len(text) - 1))
         for frequency in Counter(text).values()
     ])
     return coincidence
Exemplo n.º 60
0
 def compute(self, values):
     self._avg_values.append(math.fsum(values)/len(values))
     while len(self._avg_values) > self._smooth_steps:
         self._avg_values.pop(0)
     return math.fsum(self._avg_values)/len(self._avg_values)