コード例 #1
0
ファイル: bpnn3.py プロジェクト: 13steinj/cython
    def update(self, inputs):
#        print 'update', inputs
        if len(inputs) != self.ni-1:
            raise ValueError('wrong number of inputs')

        # input activations
        for i in range(self.ni-1):
            #self.ai[i] = 1.0/(1.0+math.exp(-inputs[i]))
            self.ai[i] = inputs[i]

        # hidden activations
        for j in range(self.nh):
            sum = 0.0
            for i in range(self.ni):
                 sum = sum + self.ai[i] * self.wi[i][j]
            self.ah[j] = 1.0/(1.0+math.exp(-sum))

        # output activations
        for k in range(self.no):
            sum = 0.0
            for j in range(self.nh):
                sum = sum + self.ah[j] * self.wo[j][k]
            self.ao[k] = 1.0/(1.0+math.exp(-sum))

        return self.ao[:]
コード例 #2
0
ファイル: pop_gen_func.py プロジェクト: stschiff/hfit
def mSel(a, mu, s, k, m):
    hyperg = hyp1f1(k + mu, m + 2.0 * mu, s)
    ret = binom(m, k)
    ret /= zSel(a, mu, s)
    ret *= math.exp(gammaln(k + mu) + gammaln(m - k + mu) - gammaln(m + 2.0 * mu))
    ret *= (1.0 - math.exp(-(1.0 - a) * s) * hyperg)
    return ret
コード例 #3
0
ファイル: realtime_signals.py プロジェクト: cycps/cys
 def on_mouse_wheel(self, event):
     dx = np.sign(event.delta[1]) * .05
     scale_x, scale_y = self.program['u_scale']
     scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx),
                                 scale_y * math.exp(0.0*dx))
     self.program['u_scale'] = (max(1, scale_x_new), max(1, scale_y_new))
     self.update()
コード例 #4
0
def c_(coef, set, sol, c_o):              
    '''Calculate c at sub lattice'''
    for x in range(0,set['Nx']+1,2):
# #         sol['c'][x] = 0.5*(numpy.sin(2*m.pi/(coef['la'])*x*set['Hh']-2*m.pi/(coef['pe'])*set['dt']*set['k'])) 
# #         if sol['c'][x] < 0:
# #             sol['c'][x] *= 0
        sol['c'][x] = coef['A_c']*m.exp(-(x*set['Hh']+(1)*set['rad']-coef['vel']*set['t'])**2/coef['vari'])
        for i in range(1,100):
            sol['c'][x] += coef['A_c']*m.exp(-(x*set['Hh']+(1)*set['rad']+i*coef['perio']-coef['vel']*set['t'])**2/coef['vari'])        
 
    
    
#     '''Different Var'''
#     dd = 0
#     x = 0
#     while dd == 0 and x <= set['Nx']:
#         if sol['c'][x] == coef['A_c']:
#             dd = 1
#         elif sol['c'][x] < coef['A_c']:
#             vari = 0.1
#             sol['c'][x] = coef['A_c']*m.exp(-(x*set['Hh']+(-3)*set['rad']-coef['vel']*set['t'])**2/vari)#0.05 set['dt']*set['k']
#             for i in range(1,100):
#                 sol['c'][x] += coef['A_c']*m.exp(-(x*set['Hh']+(-3)*set['rad']+i*coef['perio']-coef['vel']*set['t'])**2/vari)
#             x += 2
    
    return sol
コード例 #5
0
def standardMC_european_option(K, T, R, V, S0, N, option_type, path_num=10000):
    dt = T / N
    sigma = V
    drift = math.exp((R - 0.5 * sigma * sigma) * dt)
    sigma_sqrt = sigma * math.sqrt(dt)
    exp_RT = math.exp(-R * T)
    european_payoff = []
    for i in xrange(path_num):
        former = S0
        for j in xrange(int(N)):
            former = former * drift * math.exp(sigma_sqrt * numpy.random.normal(0, 1))
        european_option = former

        if option_type == 1.0:
            european_payoff_call = exp_RT * max(european_option - K, 0)
            european_payoff.append(european_payoff_call)
        elif option_type == 2.0:
            european_payoff_put = exp_RT * max(K - european_option, 0)
            european_payoff.append(european_payoff_put)

    # Standard Monte Carlo
    p_mean = numpy.mean(european_payoff)
    p_std = numpy.std(european_payoff)
    p_confmc = (p_mean - 1.96 * p_std / math.sqrt(path_num), p_mean + 1.96 * p_std / math.sqrt(path_num))
    return p_mean, p_std, p_confmc
コード例 #6
0
ファイル: mate.py プロジェクト: kelvinn/GDAY
    def peaked_arrh(self, k25, Ea, Tk, deltaS, Hd):
        """ Temperature dependancy approximated by peaked Arrhenius eqn, 
        accounting for the rate of inhibition at higher temperatures. 

        Parameters:
        ----------
        k25 : float
            rate parameter value at 25 degC
        Ea : float
            activation energy for the parameter [J mol-1]
        Tk : float
            leaf temperature [deg K]
        deltaS : float
            entropy factor [J mol-1 K-1)
        Hd : float
            describes rate of decrease about the optimum temp [J mol-1]
        
        Returns:
        -------
        kt : float
            temperature dependence on parameter 
        
        References:
        -----------
        * Medlyn et al. 2002, PCE, 25, 1167-1179. 
        
        """
        arg1 = self.arrh(k25, Ea, Tk)
        arg2 = 1.0 + exp((self.mt * deltaS - Hd) / (self.mt * const.RGAS))
        arg3 = 1.0 + exp((Tk * deltaS - Hd) / (Tk * const.RGAS))

        return arg1 * arg2 / arg3
コード例 #7
0
ファイル: nested_sampler.py プロジェクト: bnord/LensPop
 def logadd(x,y):
     """ A helper function for log addition """
     from math import log,exp
     if x>y:
         return x+log(1.+exp(y-x))
     else:
         return y+log(1.+exp(x-y))
コード例 #8
0
 def increase(self):
     value=math.log(self.read())
     value+=.25
     if math.exp(value)>=self.maxvalue:
         value=math.log(self.maxvalue)
     print (value)
     self.write(int(math.exp(value)))
コード例 #9
0
 def _guinier_porod(self, x):
     """
     Guinier-Porod Model
     """
     # parameters
     G = self.params['scale']
     s = self.params['dim']
     Rg = self.params['rg']
     m = self.params['m']
     bgd = self.params['background']
     n = 3.0 - s
     qval = x
     # take care of the singular points
     if Rg <= 0.0:
         return bgd
     if (n-3.0+m) <= 0.0:
         return bgd
     #do the calculation and return the function value
     q1 = sqrt((n-3.0+m)*n/2.0)/Rg
     if qval < q1:
         F = (G/pow(qval,(3.0-n)))*exp((-qval*qval*Rg*Rg)/n) 
     else:
         F = (G/pow(qval, m))*exp(-(n-3.0+m)/2.0)*pow(((n-3.0+m)*n/2.0),
                                     ((n-3.0+m)/2.0))/pow(Rg,(n-3.0+m))
     inten = F + bgd
 
     return inten
コード例 #10
0
ファイル: brine.py プロジェクト: paulomarcondes/pygass
def dens(temp, press=70, sali=0):
    '''Compute water or brine density from temperature, pressure and salinity,
    according to Spivey et al (2004).

    temp:   temperature in degrees Celsius.
    press:  pressure in MPa.
    sali:   salinity in ppm.'''

    mols = mol(sali)

    densw0 = coef(Dw, temp)

    if sali == 0:
# If salinity is zero, compute the density of pressure water.
        dens = densw0 * exp(compress(temp, press) - compress(temp, 70.)) \
    else:
        densb0 = densw0

        J = len(Dcm)

        for j in range(J):
            densb0 += coef(Dcm[j], temp) * mols ** (j / 2. + 0.5)

        dens = densb0 * exp(compress(temp, press, sali) - compress(temp, 70., sali)) \

    return dens
コード例 #11
0
ファイル: monte_carlo.py プロジェクト: kzhai/PyNaiveBayes
    def optimize_hyperparameters(self, samples=5, step=3.0):
        old_hyper_parameters = [math.log(self._alpha_alpha), math.log(self._alpha_beta)]
        
        for ii in xrange(samples):
            log_likelihood_old = self.compute_likelihood(self._alpha_alpha, self._alpha_beta)
            log_likelihood_new = math.log(random.random()) + log_likelihood_old
            #print("OLD: %f\tNEW: %f at (%f, %f)" % (log_likelihood_old, log_likelihood_new, self._alpha_alpha, self._alpha_beta))

            l = [x - random.random() * step for x in old_hyper_parameters]
            r = [x + step for x in old_hyper_parameters]

            for jj in xrange(self._alpha_maximum_iteration):
                new_hyper_parameters = [l[x] + random.random() * (r[x] - l[x]) for x in xrange(len(old_hyper_parameters))]
                trial_alpha, trial_beta = [math.exp(x) for x in new_hyper_parameters]
                lp_test = self.compute_likelihood(trial_alpha, trial_beta)

                if lp_test > log_likelihood_new:
                    self._alpha_alpha = math.exp(new_hyper_parameters[0])
                    self._alpha_beta = math.exp(new_hyper_parameters[1])
                    #self._alpha_sum = self._alpha_alpha * self._K
                    #self._beta_sum = self._alpha_beta * self._number_of_language_types
                    old_hyper_parameters = [math.log(self._alpha_alpha), math.log(self._alpha_beta)]
                    break
                else:
                    for dd in xrange(len(new_hyper_parameters)):
                        if new_hyper_parameters[dd] < old_hyper_parameters[dd]:
                            l[dd] = new_hyper_parameters[dd]
                        else:
                            r[dd] = new_hyper_parameters[dd]
                        assert l[dd] <= old_hyper_parameters[dd]
                        assert r[dd] >= old_hyper_parameters[dd]

            print("\nNew hyperparameters (%i): %f %f" % (jj, self._alpha_alpha, self._alpha_beta))
コード例 #12
0
def get_gradient(theta):
    global fractional_counts, feature_index, event_grad, rc, dictionary_features
    assert len(theta) == len(feature_index)
    event_grad = {}
    for event_j in events_to_features:
        (t, dj, cj) = event_j
        f_val, f = \
            get_wa_features_fired(type=t, context=cj, decision=dj, dictionary_features=dictionary_features,
                                  ishybrid=True)[0]
        a_dp_ct = exp(get_decision_given_context(theta, decision=dj, context=cj, type=t)) * f_val
        sum_feature_j = 0.0
        norm_events = [(t, dp, cj) for dp in normalizing_decision_map[t, cj]]
        for event_i in norm_events:
            A_dct = exp(fractional_counts.get(event_i, 0.0))
            if event_i == event_j:
                (ti, di, ci) = event_i
                fj, f = get_wa_features_fired(type=ti, context=ci, decision=di, dictionary_features=dictionary_features,
                                              ishybrid=True)[0]
            else:
                fj = 0.0
            sum_feature_j += A_dct * (fj - a_dp_ct)
        event_grad[event_j] = sum_feature_j  # - abs(theta[event_j])  # this is the regularizing term


    # grad = np.zeros_like(theta)
    grad = -2 * rc * theta  # l2 regularization with lambda 0.5
    for e in event_grad:
        feats = events_to_features.get(e, [])
        for f in feats:
            grad[feature_index[f]] += event_grad[e]

    # for s in seen_index:
    # grad[s] += -theta[s]  # l2 regularization with lambda 0.5
    assert len(grad) == len(feature_index)
    return -grad
コード例 #13
0
def get_decision_given_context(theta, type, decision, context):
    global cache_normalizing_decision, feature_index, source_to_target_firing, model1_probs, ets
    m1_event_prob = model1_probs.get((decision, context), 0.0)
    fired_features = get_wa_features_fired(type=type, decision=decision, context=context,
                                           dictionary_features=dictionary_features, ishybrid=True)

    theta_dot_features = sum([theta[feature_index[f]] * f_wt for f_wt, f in fired_features])
    numerator = m1_event_prob * exp(theta_dot_features)
    if (type, context) in cache_normalizing_decision:
        denom = cache_normalizing_decision[type, context]
    else:
        denom = ets[context]
        target_firings = source_to_target_firing.get(context, set([]))
        for tf in target_firings:
            m1_tf_event_prob = model1_probs.get((tf, context), 0.0)
            tf_fired_features = get_wa_features_fired(type=type, decision=tf, context=context,
                                                      dictionary_features=dictionary_features, ishybrid=True)
            tf_theta_dot_features = sum([theta[feature_index[f]] * f_wt for f_wt, f in tf_fired_features])
            denom += m1_tf_event_prob * exp(tf_theta_dot_features)
        cache_normalizing_decision[type, context] = denom
    try:
        log_prob = log(numerator) - log(denom)
    except ValueError:
        print numerator, denom, decision, context, m1_event_prob, theta_dot_features
        raise BaseException
    return log_prob
コード例 #14
0
ファイル: divTime.py プロジェクト: stsmall/An_funestus
def estimDiv(c, psmc, r, t):
    """Estimate divergence using eq 12
    """
    N0 = 0
    if psmc:
        if not r:
            # parse psmc
            f = open(psmc, 'r')
            line = f.readline().split("-eN ")
            t = [float(i.split()[0]) for i in line[1:]]
            t.insert(0, 0.0)
            r = [float(i.split()[1]) for i in line[1:]]
            N0 = float(line[0].split()[1]) / float(line[0].split()[4])
            r.insert(0, 1.0)
        i = 0
        nc = 1.0
        while (1-nc*exp(-(t[i+1]-t[i])/r[i])) < c:
            nc *= exp(-(t[i+1]-t[i])/r[i])
            i += 1
            #print("i:{}, t[i]:{}, t[i+1]:{}, r[i]:{}, nc:{}".format(i, t[i], t[i+1], r[i], nc))
        j = i
        print("nc = {}, 1-nc = {}".format(nc, 1-nc))
        T_hat = -r[j]*log((1-c) / nc) + t[j]
    else:
        T_hat = -log(1-c)  # assumes constant popsize
    return(r, t, N0, T_hat)
コード例 #15
0
def invgammapdf(x, alpha, beta):
	alpha = float(alpha)
	beta = float(beta)
	if not np.isscalar(x):
		return (beta**alpha / math.gamma(alpha))*np.array([(xi**(-alpha - 1))*math.exp(-beta/xi) for xi in x])
	else:
		return (beta**alpha / math.gamma(alpha))*(x**(-alpha - 1))*math.exp(-beta/x)
コード例 #16
0
ファイル: C2.py プロジェクト: vsilv/smag
def monte_carlo(beta, cubic, quartic):
    beta = 2.0
    N = 2 ** 5
    dtau = beta / N
    delta = 1.0
    n_steps = int(10 ** 7)
    X = np.zeros([n_steps, N])
    x = [0.0] * N
    for step in range(n_steps):
        k = random.randint(0, N - 1)
        knext, kprev = (k + 1) % N, (k - 1) % N
        x_new = x[k] + random.uniform(-delta, delta)
        old_weight = (
            rho_free(x[knext], x[k], dtau) * rho_free(x[k], x[kprev], dtau) * math.exp(-dtau * V(x[k], cubic, quartic))
        )
        new_weight = (
            rho_free(x[knext], x_new, dtau)
            * rho_free(x_new, x[kprev], dtau)
            * math.exp(-dtau * V(x_new, cubic, quartic))
        )
        if random.uniform(0.0, 1.0) < new_weight / old_weight:
            x[k] = x_new
        X[step, :] = x
        if step % 10000 == 0:
            print("step %d / %d" % (step, n_steps))
    return X
コード例 #17
0
 def QExp(self,qid,query,lDoc):
     hEntityScore = {} #ObjId -> prf score
     for doc in lDoc:
         if not doc.DocNo in self.hDocKg:
             continue
         hDocEntity = self.hDocKg[doc.DocNo]
         for ObjId,score in hDocEntity.items():
             score += doc.score #log(a) + log(b)
             if not ObjId in hEntityScore:
                 hEntityScore[ObjId] = math.exp(score)
             else:
                 hEntityScore[ObjId] += math.exp(score)
     lEntityScore = hEntityScore.items()
     lEntityScore.sort(key=lambda item:item[1],reverse = True)
     lEntityScore = lEntityScore[:self.NumOfExpEntity]
     Z = sum([item[1] for item in lEntityScore])
     if Z == 0:
         lEntityScore = []
     else:
         lEntityScore = [[item[0],item[1] / float(Z)] for item in lEntityScore]
         
     
     
     logging.info(
                  '[%s][%s] exp entity: %s',
                  qid,
                  query,
                  json.dumps(lEntityScore)
                  )
     
     return lEntityScore
コード例 #18
0
ファイル: clean_mocks.py プロジェクト: aszewciw/gal_structure
def gal_weights(Z, R):
    '''
    Returns a weight based on a particular model of the MW.
    For now we will use a two-disk model with the form below.
    This can be expanded at a later time.

    Z - Height above/below galactic plane
    R - Distance from galactic center

    '''

    # Parameters
    thick_s_height = 0.674
    thick_s_length = 2.51
    thin_s_height = 0.233
    thin_s_length = 2.34
    a = 0.1


    weight = ( ( ( math.cosh(Z / 2 / thin_s_height) ) ** (-2) )
        * math.exp(-R / thin_s_length) +
        a * ( ( math.cosh(Z / 2 / thick_s_height) ) ** (-2) )
        * math.exp(-R / thick_s_length))

    return weight
コード例 #19
0
ファイル: scc_matrix.py プロジェクト: maiconpl/DFTBpy
    def build_gamma_matrix(self):

        self.gamma = [ [0 for i in xrange(self.n_atom)] for j in xrange(self.n_atom)] # defining the dimension of the matrix.
        dist_matrix = [ [0 for i in xrange(self.n_atom)] for j in xrange(self.n_atom)] # defining the dimension of the matrix.
        fac = 0.0

        for in_atom in xrange(self.n_atom):
            for jn_atom in xrange(in_atom, self.n_atom):

                dist_matrix[in_atom][jn_atom] = self.dist_matrix[in_atom][jn_atom]

                ta = 3.2*self.hubbard_s[in_atom]
                tb = 3.2*self.hubbard_s[jn_atom]

                if dist_matrix[in_atom][jn_atom] == 0.0: # the same atom
                   self.gamma[in_atom][jn_atom] = 0.5*( (ta*tb/(ta + tb)) + ( (ta*tb)**2/(ta + tb)**3) )

                if dist_matrix[in_atom][jn_atom] != 0.0 and abs(ta - tb) < 10.0**(-4): # gamma's are very close, e. g. for the same atom type.
                   fac = ( (1.6*dist_matrix[in_atom][jn_atom]*ta*tb)/(ta + tb) )*(1.0 + (ta*tb)/(ta + tb)**2 )
                   self.gamma[in_atom][jn_atom] = 1.0/dist_matrix[in_atom][jn_atom] - (48 + 33*fac + (9.0 + fac)*fac**2)*exp(-fac)/(48*dist_matrix[in_atom][jn_atom])

                if self.xyz_atom_symbols[in_atom] != self.xyz_atom_symbols[jn_atom]: # gamma's are different

                   self.gamma[in_atom][jn_atom] = 1.0/dist_matrix[in_atom][jn_atom] - \
                                                        (math.exp(-(ta*dist_matrix[in_atom][jn_atom]))* \
                                                         ( ((tb**4*ta)/(2*(ta**2 - tb**2)**2)) - \
                                                           ((tb**6 - 3*tb**4*ta**2)/((ta**2 - tb**2)**3*dist_matrix[in_atom][jn_atom])) ))  - \
                                                        (math.exp(-(tb*dist_matrix[in_atom][jn_atom]))* \
                                                         ( ((ta**4*tb)/(2*(tb**2 - ta**2)**2)) - \
                                                           ((ta**6 - 3*ta**4*tb**2)/((tb**2 - ta**2)**3*dist_matrix[in_atom][jn_atom])) ))

                self.gamma[jn_atom][in_atom] = self.gamma[in_atom][jn_atom]
コード例 #20
0
        def test_exceptions(self):
            try:
                x = math.exp(-1000000000)
            except:
                # mathmodule.c is failing to weed out underflows from libm, or
                # we've got an fp format with huge dynamic range
                self.fail("underflowing exp() should not have raised "
                          "an exception")
            if x != 0:
                self.fail("underflowing exp() should have returned 0")

            # If this fails, probably using a strict IEEE-754 conforming libm, and x
            # is +Inf afterwards.  But Python wants overflows detected by default.
            try:
                x = math.exp(1000000000)
            except OverflowError:
                pass
            else:
                self.fail("overflowing exp() didn't trigger OverflowError")

            # If this fails, it could be a puzzle.  One odd possibility is that
            # mathmodule.c's macros are getting confused while comparing
            # Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
            # as a result (and so raising OverflowError instead).
            try:
                x = math.sqrt(-1.0)
            except ValueError:
                pass
            else:
                self.fail("sqrt(-1) didn't raise ValueError")
コード例 #21
0
def nsCurvFunc(p, y, x):
	'''
	p is the parameters in a tuple
	y is an array of yields
	x is an array of maturities

	Given the parameters, calculate Nelson-Siegel values for each 
	node on the curve and return the residual values in an array'''

	# Unpack parameter tuple
	a1, a2, a3, B = p

	# Iterate over the array and perform NS calculation
	nsOut = []
	for i in nditer(x):
		nsOut.append(round(a1 + a2*(1-exp(-i/B))/(i/B)+a3*((1-exp(-i/B))/(i/B)-exp(-i/B)),6))

	# Convert list to array
	nsArray = array(nsOut)*100
	
	# Find the errors for each node
	err = y - nsArray
	
	# print(p)
	# print(err)
	return err
コード例 #22
0
ファイル: bigramSearch.py プロジェクト: Ketcomp/nlp
    def goodTuringCalculations(bigTallyInSentence, sentenceNo, vocabulary):
        # Initialize probability to 0
        sentenceProb = 0
        # print(keepCount)

        f = open("s" + str(sentenceNo) + "GT.txt", "w")
        for key, value in bigTallyInSentence.items():
            if 0 == bigramOccurrences[key]:
                sentenceProb += math.log(keepCount[1]) - math.log(bigrams.__len__())
                f.write(str((key, str((math.log(keepCount[1]) - math.log(bigrams.__len__()))))))
            elif bigramOccurrences[key] > 5:
                sentenceProb += math.log(value + 1) - math.log(vocabulary[str(key[0])] + vocabulary.__len__())
                f.write(
                    str(
                        (
                            key,
                            str(
                                math.exp(math.log(value + 1) - math.log(vocabulary[str(key[0])] + vocabulary.__len__()))
                            ),
                        )
                    )
                )

        f.close()

        print("The probability of sentence " + str(sentenceNo) + " is: " + str(math.exp(sentenceProb)))
        return math.exp(sentenceProb)
コード例 #23
0
def hierarchy_dist(synset_1, synset_2):
    """
    Return a measure of depth in the ontology to model the fact that
    nodes closer to the root are broader and have less semantic similarity
    than nodes further away from the root.
    """
    h_dist = sys.maxint
    if synset_1 is None or synset_2 is None:
        return h_dist
    if synset_1 == synset_2:
        # return the depth of one of synset_1 or synset_2
        h_dist = max([x[1] for x in synset_1.hypernym_distances()])
    else:
        # find the max depth of least common subsumer
        hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
        hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
        lcs_candidates = set(hypernyms_1.keys()).intersection(
            set(hypernyms_2.keys()))
        if len(lcs_candidates) > 0:
            lcs_dists = []
            for lcs_candidate in lcs_candidates:
                lcs_d1 = 0
                if hypernyms_1.has_key(lcs_candidate):
                    lcs_d1 = hypernyms_1[lcs_candidate]
                lcs_d2 = 0
                if hypernyms_2.has_key(lcs_candidate):
                    lcs_d2 = hypernyms_2[lcs_candidate]
                lcs_dists.append(max([lcs_d1, lcs_d2]))
            h_dist = max(lcs_dists)
        else:
            h_dist = 0
    return ((math.exp(beta * h_dist) - math.exp(-beta * h_dist)) /
        (math.exp(beta * h_dist) + math.exp(-beta * h_dist)))
コード例 #24
0
ファイル: test.py プロジェクト: Tell1/ml-impl
def log_add(left, right):
    if (right < left):
        return left + math.log1p(math.exp(right - left))
    elif (right > left):
        return right + math.log1p(math.exp(left - right))
    else:
        return left + M_LN2
コード例 #25
0
ファイル: Math1.py プロジェクト: Brattelnik/Borodulin
def B(x):
    y=None
    if math.sin(x/(x**2+2))+math.exp(math.log1p(x)+1)==0 or x==0:
        y='Neopredelen'
    else:
         y=(1/(math.sin(x/(x**2+2))+math.exp(math.log1p(x)+1)))-1
    return y
コード例 #26
0
ファイル: lab3.py プロジェクト: fristedt/maskin
def trainBoost(X, labels,T=5,covdiag=True):
    N = len(X)
    C = len(set(labels))
    d = len(X[0])

    priors = np.zeros(shape=(T, C))
    mus = np.zeros(shape=(T, C, d))
    sigmas = np.zeros(shape=(T, d, d, C))
    alphas = np.zeros(T)

    W = np.ones(N) / N
    for t in range(T-1):
        mus[t], sigmas[t] = mlParams(X, labels, W)
        priors[t] = computePrior(labels, W)

        delta = computeDelta(X, priors[t], mus[t], sigmas[t], labels, covdiag)
        error = sum([W[i]*(1-delta[i]) for i in range(N)])
        if error == 0:
            error = 1e-6 # Prevent log(0)
        alphas[t] = (np.log(1-error) - np.log(error))/2
        W = [W[i] * math.exp(-alphas[t]) if delta[i] else W[i] * math.exp(alphas[t]) for i in range(N)]
        W /= sum(W)

    t += 1
    mus[t], sigmas[t] = mlParams(X, labels, W)
    priors[t] = computePrior(labels, W)

    delta = computeDelta(X, priors[t], mus[t], sigmas[t], labels, covdiag)
    error = sum([W[i]*delta[i] for i in range(N)])
    alphas[t] = (np.log(1-error) - np.log(error))/2

    return priors,mus,sigmas,alphas
コード例 #27
0
 def f_active(self, x):
     if self.use_sigmod:
         # range [0, 1]
         return 1.0 / (math.exp(-x * self.shim) + 1.0)
     else:
         # range [-1, 1]
         return 1.0 - 2 / (math.exp(2*x * self.shim) + 1)
コード例 #28
0
ファイル: ising.py プロジェクト: poneill/amic
 def update_spins(spins, i, r):
     current_energy = spins[i] * (hs[i] + J * (spins[(i - 1) % N] + spins[(i + 1) % N]))
     prop_energy = -current_energy
     p_prop = exp(-prop_energy) / (exp(-current_energy) + exp(-prop_energy))
     # print "p_prop:",p_prop
     if r < p_prop:
         spins[i] *= -1
コード例 #29
0
def toKepler(u, which = 'Pueyo', mass = 1, referenceTime = None):
    """
    """
    if which == 'Pueyo':
        res = np.zeros(6)
        res[1] = u[1]
        res[5] = u[5]
        
        res[0] = semimajoraxis(math.exp(u[0]), starMass = mass)
        res[2] = math.degrees(math.acos(u[2]))
        res[3] = np.mod((u[3]-u[4])*0.5,360)
        res[4] = np.mod((u[3]+u[4])*0.5,360)
        return res
    elif which == 'alternative':
        res = np.zeros(6)
        res[1] = u[1]
        res[5] = u[5]
        
        res[0] = semimajoraxis(math.exp(u[0]), starMass = mass)
        res[2] = math.degrees(math.acos(u[2]))
        res[3] = u[3]
        res[4] = u[4]
        return res        
    elif which == 'Chauvin':
        stat = StatisticsMCMC()
        res = stat.xFROMu(u,referenceTime,mass)    
        return res
    
    return None
コード例 #30
0
ファイル: knock73.py プロジェクト: tmu-nlp/100knock2016
def rog_learn(c,f):
    import math
    from knock72 import mk_feature
    from nltk import stem
    from collections import defaultdict

    stemmer=stem.PorterStemmer()
    d=defaultdict(lambda:0)
    al=0.6
    count=0
    while(count<c):
        count+=1
        for line in f:#.split("\n"):
            y=line.split(" ")[0]
            x=mk_feature(line)
            score=0
            for key,value in x.items():
                score+=d[key]*value
            if y=="+1":
                y=1
            elif y=="-1":
                y=-1
            dp_dw=y*math.exp(score)/((1+math.exp(score))**2)
            for key,value in x.items():
                d[key]+=dp_dw*value*al
        al=al*0.8
    return d
コード例 #31
0
Na_m1h1 = smodel.ChanState('Na_m1h1', mdl, Na)
Na_m2h1 = smodel.ChanState('Na_m2h1', mdl, Na)
Na_m3h1 = smodel.ChanState('Na_m3h1', mdl, Na)

# Leak channel
L = smodel.Chan('L', mdl)
Leak = smodel.ChanState('Leak', mdl, L)

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # 

# Hodgkin-Huxley gating kinetics

# Temperature dependence
thi = math.pow(3.0, ((celsius-6.3)/10.0))

_a_n = lambda mV: thi*((0.01*(10-(mV+65.))/(math.exp((10-(mV+65.))/10.)-1)))

_b_n = lambda mV: thi*((0.125*math.exp(-(mV+65.)/80.)))

_a_m = lambda mV: thi*((0.1*(25-(mV+65.))/(math.exp((25-(mV+65.))/10.)-1)))

_b_m = lambda mV: thi*((4.*math.exp(-(mV+65.)/18.)))


_a_h = lambda mV: thi*((0.07*math.exp(-(mV+65.)/20.)))

_b_h = lambda mV: thi*((1./(math.exp((30-(mV+65.))/10.)+1)))


# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # 
コード例 #32
0
def sigmoid(x):
    return 1.0/(math.exp(-x)+1.)
コード例 #33
0
    klMax = 0.1
    print("klMax =", klMax)
    print("eta = ", reseqp(p, V, klMax))
    # print("eta 2 = ", reseqp2(p, V, klMax))
    print("Uq = ", maxEV(p, V, klMax))

    print("\np =", p)
    p = np.array([
        0.11794872, 0.27948718, 0.31538462, 0.14102564, 0.0974359, 0.03076923,
        0.00769231, 0.01025641, 0.
    ])
    print("V =", V)
    V = np.array([0, 1, 2, 3, 4, 5, 6, 7, 10])
    klMax = 0.0168913409484
    print("klMax =", klMax)
    print("eta = ", reseqp(p, V, klMax))
    # print("eta 2 = ", reseqp2(p, V, klMax))
    print("Uq = ", maxEV(p, V, klMax))

    x = 2
    print("\nx =", x)
    d = 2.51
    print("d =", d)
    print("klucbExp(x, d) = ", klucbExp(x, d))

    ub = x / (1 + 2. / 3 * d - sqrt(4. / 9 * d * d + 2 * d))
    print("Upper bound = ", ub)
    print("Stupid upperbound = ", x * exp(d + 1))

    print("\nDone for tests of 'kullback.py' ...")
コード例 #34
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, ConfigArguments))
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        # If we pass only one argument to the script and it's the path to a json file,
        # let's parse it to get our arguments.
        model_args, data_args, training_args, config_args = parser.parse_json_file(
            json_file=os.path.abspath(sys.argv[1])
        )
    else:
        model_args, data_args, training_args, config_args = parser.parse_args_into_dataclasses()

    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome."
            )
        elif last_checkpoint is not None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        handlers=[logging.StreamHandler(sys.stdout)],
    )
    logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)

    # Log on each process the small summary:
    logger.warning(
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
    )
    # Set the verbosity to info of the Transformers logger (on main process only):
    if is_main_process(training_args.local_rank):
        transformers.utils.logging.set_verbosity_info()
        transformers.utils.logging.enable_default_handler()
        transformers.utils.logging.enable_explicit_format()
    logger.info(f"Training/evaluation parameters {training_args}")

    # Set seed before initializing model.
    set_seed(training_args.seed)

    # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
    # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
    # (the dataset will be downloaded automatically from the datasets Hub).
    #
    # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
    # 'text' is found. You can easily tweak this behavior (see below).
    #
    # In distributed training, the load_dataset function guarantee that only one local process can concurrently
    # download the dataset.
    if data_args.dataset_name is not None:
        # Downloading and loading a dataset from the hub.
        datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, keep_in_memory=False, cache_dir=model_args.cache_dir)
        if "validation" not in datasets.keys():
            datasets["validation"] = load_dataset(
                data_args.dataset_name,
                data_args.dataset_config_name,
                split=f"train[:{data_args.validation_split_percentage}%]",
                keep_in_memory=False,
                cache_dir=model_args.cache_dir
            )
            datasets["train"] = load_dataset(
                data_args.dataset_name,
                data_args.dataset_config_name,
                split=f"train[{data_args.validation_split_percentage}%:]",
                keep_in_memory=False,
                cache_dir=model_args.cache_dir
            )
    else:
        data_files = {}
        if data_args.train_file is not None:
            data_files["train"] = data_args.train_file
        if data_args.validation_file is not None:
            data_files["validation"] = data_args.validation_file
        extension = (
            data_args.train_file.split(".")[-1]
            if data_args.train_file is not None
            else data_args.validation_file.split(".")[-1]
        )
        if extension == "txt":
            extension = "text"
        datasets = load_dataset(extension, data_files=data_files, keep_in_memory=False, cache_dir=model_args.cache_dir)
    if data_args.sanity:
        datasets["train"] = datasets["train"].shard(100, index=0, contiguous=True)
    # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
    # https://huggingface.co/docs/datasets/loading_datasets.html.

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config_kwargs = {
        "cache_dir": model_args.cache_dir,
        "revision": model_args.model_revision,
        "use_auth_token": True if model_args.use_auth_token else None,
    }
    if model_args.config_name:
        config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
    elif model_args.model_name_or_path:
        config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
    else:
        config = CONFIG_MAPPING[model_args.model_type](**vars(config_args), **config_kwargs)
        logger.warning("You are instantiating a new config instance from scratch.")

    tokenizer_kwargs = {
        "cache_dir": model_args.cache_dir,
        "use_fast": model_args.use_fast_tokenizer,
        "revision": model_args.model_revision,
        "use_auth_token": True if model_args.use_auth_token else None,
    }
    if model_args.tokenizer_name:
        tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
    elif model_args.model_name_or_path:
        tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
    else:
        raise ValueError(
            "You are instantiating a new tokenizer from scratch. This is not supported by this script."
            "You can do it from another script, save it, and load it from here, using --tokenizer_name."
        )

    if model_args.model_name_or_path:
        model = AutoModelForSeq2SeqLM.from_pretrained(
            model_args.model_name_or_path,
            from_tf=bool(".ckpt" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
            revision=model_args.model_revision,
            use_auth_token=True if model_args.use_auth_token else None,
        )
    else:
        logger.info("Training new model from scratch")
        model = AutoModelForSeq2SeqLM.from_config(config)

    model.resize_token_embeddings(len(tokenizer))

    # Preprocessing the datasets.
    # First we tokenize all the texts.
    if training_args.do_train:
        column_names = datasets["train"].column_names
    else:
        column_names = datasets["validation"].column_names
    text_column_name = "text" if "text" in column_names else column_names[0]

    # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
    tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")

    datasets = datasets.shuffle()
    def tokenize_function(examples):
        with CaptureLogger(tok_logger) as cl:
            output = tokenizer(examples[text_column_name])
        # clm input could be much much longer than block_size
        if "Token indices sequence length is longer than the" in cl.out:
            tok_logger.warning(
                "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
            )
        return output

    # Ensures only the main process does dataset pre-processing; the other ones will load the `map` cache
    if not is_main_process(training_args.local_rank):
        print("waiting for main process to execute mapping")
        torch.distributed.barrier()

    logger.info("Mapping dataset to tokenized dataset.",)
    tokenized_datasets = datasets.map(
        tokenize_function,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
        remove_columns=column_names,
        load_from_cache_file=not data_args.overwrite_cache,
        keep_in_memory=False
    )

    if data_args.block_size is None:
        block_size = tokenizer.model_max_length
        if block_size > 1024:
            logger.warning(
                f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
                "Picking 1024 instead. You can change that default value by passing --block_size xxx."
            )
            block_size = 1024
    else:
        if data_args.block_size > tokenizer.model_max_length:
            logger.warning(
                f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
                f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
            )
        block_size = min(data_args.block_size, tokenizer.model_max_length)

    # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
    def group_texts(examples):
        # Concatenate all texts.
        concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
        total_length = len(concatenated_examples[list(examples.keys())[0]])
        # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
        # customize this part to your needs.
        total_length = (total_length // (2 * block_size)) * 2 * block_size
        # Split by chunks of max_len.
        result = {
            k: [t[i : i + block_size] for i in range(0, total_length, 2*block_size)]
            for k, t in concatenated_examples.items()
        }
        result["labels"] = [
            concatenated_examples['input_ids'][i : i + block_size]
            for i in range(block_size, total_length, 2*block_size)
        ]
        return result

    # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
    # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
    # to preprocess.
    #
    # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
    # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map

    logger.info("Chunking tokenized dataset.")
    lm_datasets = tokenized_datasets.map(
        group_texts,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
        load_from_cache_file=not data_args.overwrite_cache,
        keep_in_memory=False
    )

    # Now the other ones can catch up.
    if training_args.local_rank != -1 and is_main_process(training_args.local_rank):
        print("loading results from main process")
        torch.distributed.barrier()

    if training_args.do_train:
        if "train" not in tokenized_datasets:
            raise ValueError("--do_train requires a train dataset")
        train_dataset = lm_datasets["train"]
        if data_args.max_train_samples is not None:
            train_dataset = train_dataset.select(range(data_args.max_train_samples))

    if training_args.do_eval:
        if "validation" not in tokenized_datasets:
            cutoff = data_args.validation_split_percentage * len(lm_datasets["train"]) // 100
            train_dataset = lm_datasets["train"].select(range(cutoff, len(lm_datasets["train"])))
            eval_dataset = lm_datasets["train"].select(range(cutoff))
        else:
            eval_dataset = lm_datasets["validation"]
        if data_args.max_val_samples is not None:
            eval_dataset = eval_dataset.select(range(data_args.max_val_samples))


    # Initialize our Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset if training_args.do_train else None,
        eval_dataset=eval_dataset if training_args.do_eval else None,
        tokenizer=tokenizer,
        # Data collator will default to DataCollatorWithPadding, so we change it.
        data_collator=default_data_collator,
        callbacks=[LogFlosCallback, TensorBoardFloIndexedCallback]
    )

    # Training
    if training_args.do_train:
        checkpoint = None
        if training_args.resume_from_checkpoint is not None:
            checkpoint = training_args.resume_from_checkpoint
        elif last_checkpoint is not None:
            checkpoint = last_checkpoint

        train_result = trainer.train(resume_from_checkpoint=checkpoint)
        trainer.save_model()  # Saves the tokenizer too for easy upload

        metrics = train_result.metrics

        max_train_samples = (
            data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
        )
        metrics["train_samples"] = min(max_train_samples, len(train_dataset))

        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()

    # Evaluation
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        metrics = trainer.evaluate()

        max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
        metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
        perplexity = math.exp(metrics["eval_loss"])
        metrics["perplexity"] = perplexity

        trainer.log_metrics("eval", metrics)
        trainer.save_metrics("eval", metrics)
コード例 #35
0
def eq1(eta):
        eq1 = i_o*(exp((1-beta)*F*eta/(R*T))-exp(-beta*F*eta/(R*T)))-I_ext[g]
        return eq1
コード例 #36
0
 def print_performances(header, loss, accu, start_time):
     print('  - {header:12} ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
           'elapse: {elapse:3.3f} min'.format(
               header=f"({header})", ppl=math.exp(min(loss, 100)),
               accu=100*accu, elapse=(time.time()-start_time)/60))
コード例 #37
0
ファイル: Probability.py プロジェクト: Yedzinovich/Data-Chart
def normal_pdf(x, mu = 0, sigma = 1):
  sqrt_two_pi = math.sqrt(2 * math.pi)
  return (math.exp(-(x-mu) ** 2 / 2 / sigma ** 2) / (sqrt_two_pi * sigma))
コード例 #38
0
ファイル: g_a.py プロジェクト: Samuel1s/Algoritmo_Genetico
def objective(x, y):
    return MIN_VALUE - ( math.sin(x) * math.exp((1 - math.cos(y)) ** 2) + math.cos(y) * math.exp((1 - math.sin(x)) ** 2) + (x - y) ** 2 )
コード例 #39
0
 def rampdown(self,epoch):
     if epoch >= (self.num_epochs - self.rampdown_length):
         ep = (epoch - (self.num_epochs - self.rampdown_length)) * 0.5
         return math.exp(-(ep * ep) / self.rampdown_length)
     else:
         return 1.0
コード例 #40
0
ファイル: grid.py プロジェクト: biobakery/anadama
import re
import time
import tempfile
import operator
import itertools
import subprocess
from math import exp

from doit.exceptions import CatchedException
from doit.runner import MThreadRunner

from .. import picklerunner, performance
from ..util import dict_to_cmd_opts, partition, intatleast1


sigmoid = lambda t: 1/(1-exp(-t))
first = operator.itemgetter(0)


class GridRunner(MThreadRunner):
    def __init__(self, partition,
                 performance_url=None,
                 tmpdir="/tmp",
                 extra_grid_args="",
                 *args, **kwargs):
        super(GridRunner, self).__init__(*args, **kwargs)
        self.partition = partition
        self.tmpdir = tmpdir
        self.performance_predictor = performance.new_predictor(performance_url)
        self.extra_grid_args = extra_grid_args
        self.id_task_map = dict()
コード例 #41
0
ファイル: statistics.py プロジェクト: eduamf/OpenNMT-py
 def ppl(self):
     """ compute perplexity """
     return math.exp(min(self.loss / self.n_words, 100))
コード例 #42
0
def calculateProbability(x, mean, stdev):
    exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
    return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent
コード例 #43
0
def ImputeGenotype(Position,
                   ReadLST,
                   PloidyLevels,
                   ErrorRate=0.005,
                   Quals=None,
                   FixedGenos=None):
    """ Impute the genotypes at a specific position for a sample of reads (ReadLST) with ploidy levels given in PloidyLevels, assuming 
    Hardy-Weinberg equilibrium to obtain prior frequencies of each genotype from the allele frequencies obtained from the reads. These 
    priors are updated using the Bayes's formula to produce posterior probabilities for each parental and offspring genotype. To allow
    partial imputation, a set of genotypes (FixedGenos) can be specified (of length equal to the number of samples), so that a sample
    is not imputed if its corresponding FixedGeno is not None and not missing."""
    assert FixedGenos is None or len(FixedGenos) == len(
        ReadLST
    ), "Fixed genotypes must be given as a list of genotypes equal in length to the number of samples!"
    assert FixedGenos is None or all([
        _G is None or isinstance(_G, Genotype) for _G in FixedGenos
    ]), "Fixed genotypes list can only include genotype objects!"
    if FixedGenos is not None:
        if all([_G is not None and not _G.isMISSING() for _G in FixedGenos]):
            return [_G.GetGenes() for _G in FixedGenos]
    else:
        FixedGenos = [None for _Read in ReadLST]
    all_alleles = []
    for _Reads in ReadLST:
        for _r in _Reads:
            if Position in _r.GetPos():
                all_alleles.append(
                    _r.GetDict()[Position]
                )  # extract all of the alleles from the reads at the specified position
    n = len([
        _x for _x in all_alleles
        if (_x == 1 or _x == '1' or _x == 0 or _x == '0')
    ])  # The total population coverage at the specified position
    if n == 0:
        raise BlockException(
            "No genotype could be imputed as the population coverage was zero!"
        )
    if not Quals or all(
            not _qual for _qual in Quals):  # binomial model Z ~ Binomial(n, p)
        Z = len([_x for _x in all_alleles if (_x == 1 or _x == '1')])
        p_hat = float(Z) / n
        p_true_hat = (p_hat - ErrorRate) / (
            1 - 2 * ErrorRate
        )  # p_hat = Z/n = (1-ErrorRate)P_true + ErrorRate(1-p_true), var_p_hat_true = ((1./(1 - 2 * ErrorRate))**2) * p_hat * (1 - p_hat) / n
    else:
        all_quals = []
        for _Reads, _Quals in zip(ReadLST, Quals):
            if _Quals is None:
                all_quals.extend([Error_Rate] * len(_Reads))
            else:
                for _r, _qual in zip(_Reads, _Quals):
                    if Position in _r.GetPos():
                        try:
                            all_quals.append(10**(-float(_qual[Position]) /
                                                  10))
                        except ValueError:
                            all_quals.append(ErrorRate)
        Z = 0  # In case quality scores, yielding error rates, are provided, we assume Prob(Allele(i)==1), i=1,...,n  has a normal distribution
        for _n in range(0, len(all_alleles)):
            if all_alleles[_n] == 1 or all_alleles[_n] == '1':
                Z += 1 - all_quals[_n]
            elif all_alleles[_n] == 0 or all_alleles[_n] == '0':
                Z += all_quals[_n]
        p_true_hat = float(Z) / n
    total_genos = []  # The list of ML imputed genotypes
    _Genotypes_Possible = dict()
    _Genotypes_Priors = dict()
    for _ploidy in set(PloidyLevels):
        _Genotypes_Possible[_ploidy] = [
            _g for _g in itertools.product(*[[0, 1]] * _ploidy)
        ]  # all possible bi-allelic SNP genotypes
        _Genotypes_Priors[_ploidy] = [1] * len(_Genotypes_Possible[_ploidy])
    for _ploidy, _Genos in _Genotypes_Possible.iteritems():
        for _n, _G in enumerate(_Genos):
            _number_of_ones = 0
            for _allele in _G:
                if _allele == 1:
                    _number_of_ones += 1
                    _Genotypes_Priors[_ploidy][_n] *= p_true_hat
                else:
                    _Genotypes_Priors[_ploidy][_n] *= (1 - p_true_hat)
            _Genotypes_Priors[_ploidy][_n] *= misc.comb(
                _ploidy, _number_of_ones)
    _Geno_Parents = []
    _Prior_Parents = []
    for _id in range(
            0, 2
    ):  # obtain first parental posteriors P(Gm,Gf|R)=P(R|Gm,Gf)P(Gm,Gf)/...
        if FixedGenos[_id] is not None and not FixedGenos[_id].isMISSING(
        ):  # in case a fixed genotype is given for a parent, just use that!
            _Geno_Parents.append((FixedGenos[_id].GetGenes(), ))
            _Prior_Parents.append((1, ))
        else:
            _Geno_Parents.append(
                tuple(_G for _G in _Genotypes_Possible[PloidyLevels[_id]]))
            _Prior_Parents.append(
                tuple(_P for _P in _Genotypes_Priors[PloidyLevels[_id]]))
    _Geno_Parents = [
        _GmGf
        for _GmGf in itertools.product(_Geno_Parents[0], _Geno_Parents[1])
    ]
    _Prior_Parents = [
        _PmPf
        for _PmPf in itertools.product(_Prior_Parents[0], _Prior_Parents[1])
    ]
    Population_GenotypeLST = []
    for _n, _GmGf in enumerate(_Geno_Parents):
        _Child_GenotypeLST = [
        ]  # obtain the list of possible offspring genotypes for each choice of the parents
        for _megagamete in Gametogenesis(
                Haplotypes(Position, Position, loge(_Prior_Parents[_n][0]), 0,
                           None, None, *_GmGf[0])
        ):  # obtain and store all of the possible offspring haplotypes from the parents assuming no recombination
            for _microgamete in Gametogenesis(
                    Haplotypes(Position, Position, loge(_Prior_Parents[_n][1]),
                               0, None, None, *_GmGf[1])):
                _Child_GenotypeLST.append(
                    Haplotypes(Position, Position, 0, 0, None, None,
                               *(_megagamete + _microgamete)))
        num_all_zygotes = len(_Child_GenotypeLST)
        _Child_GenotypeLST = collections.Counter(_Child_GenotypeLST)
        for _Hc in _Child_GenotypeLST.keys():
            _Hc.SetRL(loge(float(_Child_GenotypeLST[_Hc]) / num_all_zygotes))
        Population_GenotypeLST.append(
            (Haplotypes(Position, Position, loge(_Prior_Parents[_n][0]), 0,
                        None, None, *_GmGf[0]),
             Haplotypes(Position, Position, loge(_Prior_Parents[_n][1]), 0,
                        None, None, *_GmGf[1])) +
            tuple(_Hc for _Hc in _Child_GenotypeLST.keys()))
    if len(
            Population_GenotypeLST
    ) > 1:  # Calculate the posterior of the parental genotype pairs only if more than one estimated genotype pair is acceptable
        Posterior_Parents = []
        for _Impute_Num in range(
                0, len(Population_GenotypeLST)
        ):  # Obtain the posterior probability of every population imputation. First get the posterior of each parent
            _LogLik_Reads_Parents = GetProbReads_Founders(
                ReadLST, (Population_GenotypeLST[_Impute_Num][0],
                          Population_GenotypeLST[_Impute_Num][1]),
                eps=ErrorRate,
                pplog=True,
                QualsLST=Quals,
                getcounts=False,
                min_read_length=1
            )  # Get the likelihood of all the reads conditional on the proposed imputation of the parents
            Posterior_Parents.append(
                Population_GenotypeLST[_Impute_Num][0].GetRL() +
                Population_GenotypeLST[_Impute_Num][1].GetRL() +
                _LogLik_Reads_Parents
            )  # P(GmGf|R) = P(R|GmGf)P(GmGf)/... = P(R|GmGf)P(Gm)P(Gf)/...
        _norm_parents = 0
        for _Impute_Num in range(0, len(Population_GenotypeLST)):
            _norm_parents += exp(Posterior_Parents[_Impute_Num])
        _log_norm_parents = loge(_norm_parents)
        Posterior_Parents = [
            _Post - _log_norm_parents for _Post in Posterior_Parents
        ]
        Population_GenotypeLST, Posterior_Parents = [
            _Hp_Post for _Hp_Post in zip(
                *sorted(zip(Population_GenotypeLST, Posterior_Parents),
                        key=lambda x: x[1],
                        reverse=True))
        ]  # Choose the most probable estimate for parental genotypes
        for _imputation_number in range(0, len(Population_GenotypeLST)):
            for _id in range(0, 2):
                Population_GenotypeLST[_imputation_number][_id].SetRL(
                    Posterior_Parents[_imputation_number])
        #Imputations = [Population_GenotypeLST[0][0], Population_GenotypeLST[0][1]] # Consider the most likely parental imputations as the new parental genotypes
        Imputations = [
            [
                Population_GenotypeLST[_imputation_number][0],
                Population_GenotypeLST[_imputation_number][1]
            ] + [None for _cid in range(2, len(ReadLST))]
            for _imputation_number in range(0, len(Population_GenotypeLST))
        ]  # Consider all parental imputations as the new parental genotypes
    else:
        Imputations = [
            [Population_GenotypeLST[0][0], Population_GenotypeLST[0][1]] +
            [None for _cid in range(2, len(ReadLST))]
        ]
    parental_imputation_number = 0
    Max_a_posteriori = 0
    while parental_imputation_number < len(Imputations):
        for _cid in range(
                2, len(ReadLST)
        ):  # assume offspring independence conditional on the parents, obtain the posterior of each offspring
            if FixedGenos[_cid] is None or FixedGenos[_cid].isMISSING(
            ):  # do not impute an offspring genotype if it is given in FixedGenos
                _LogLik_Offspring = [
                    GetProbReads(ReadLST[_cid],
                                 _Child_Geno,
                                 eps=ErrorRate,
                                 pplog=True,
                                 Quals=Quals[_cid],
                                 getcounts=False,
                                 min_read_length=1) for _Child_Geno in
                    Population_GenotypeLST[parental_imputation_number][2:]
                ]  # Calculate the probability of reads conditional on each candidate offspring haplotype
                Posterior_Offspring = [
                    _Hc.GetRL() + _LogLik for _Hc, _LogLik in zip(
                        Population_GenotypeLST[parental_imputation_number][2:],
                        _LogLik_Offspring)
                ]  # P(Gci|Ri, Gm, Gf) = P(Ri|Gci, Gm, Gf)P(Gci|Gm, Gf)/... = P(Ri|Gci)P(Gci|Gm, Gf)/...
                lognorm_Offspring = loge(sum(map(exp, Posterior_Offspring)))
                Posterior_Offspring = [
                    _x - lognorm_Offspring for _x in Posterior_Offspring
                ]
                Imputations[parental_imputation_number][
                    _cid] = Population_GenotypeLST[parental_imputation_number][
                        2 + Posterior_Offspring.index(
                            max(Posterior_Offspring)
                        )]  # Choose the child genotype with maximum posterior probability
            else:
                Imputations[parental_imputation_number][_cid] = Haplotypes(
                    Position, Position, 0, 0, None, None,
                    *FixedGenos[_cid].GetGenes())
        if sum(_H.GetRL()
               for _H in Imputations[parental_imputation_number][1:]) > sum(
                   _H.GetRL() for _H in Imputations[Max_a_posteriori][1:]):
            Max_a_posteriori = parental_imputation_number
        parental_imputation_number += 1
    return [_H.GetVS() for _H in Imputations[Max_a_posteriori]]
コード例 #44
0
ファイル: Q28_28.py プロジェクト: PedroFabriz2/Algoritmos
def F(x, y):
	return 0.5*math.exp(-1.5*x) - y
コード例 #45
0
def GetProbReads_Founders(ReadsLST,
                          H,
                          eps=0.0005,
                          pplog=False,
                          QualsLST=[None, None, None],
                          getcounts=False,
                          min_read_length=2):
    """ Probability of a set of reads gathered from all of the family members, i.e. P[Rm,Rf,Rc1,...,Rcn|Hm, Hf, eps] = 
        Mult(Mult(P[r|Hm, Hf, eps] for r in R) for R in (Rm,Rf,Rc1,...,Rcn)), assuming independence & using GetlogProb(r, Vset, eps) (Berger et al. 2014, p. 4). 
        P[r|Hm, Hf, eps] = P[r|Hm, eps] if r in Rm,  P[r|Hm, Hf, eps] = P[r|Hf, eps] if r in Rf & P[r|Hm, Hf, eps] = 1/2*P[r|Hm, eps]+1/2*P[r|Hf, eps] if r 
        in Rci, i=1,...n. If getcounts if True, also calculate the number of reads assigned to each homologue."""
    probLST = [
    ]  # list to store the probability of R in (Rm, Rf, Rc1, ...,Rcn)
    if getcounts:
        countLST = [
        ]  # list to store the number of reads mapped to each homologue in [h1m,...hkmm, h1f,...mkff] for R in (Rm, Rf, Rc1, ...,Rcn)
    for _id, Reads in enumerate(ReadsLST):  #[0:2]):
        try:
            if (_id == 0) or (
                    _id
                    == 1):  # if the read set belongs to one of the founders
                if QualsLST[_id]:
                    if getcounts:
                        probs, counts = list(
                            zip(*[
                                GetlogProb(_Read, H[_id], eps, _Qual,
                                           getcounts, min_read_length)
                                for _Read, _Qual in zip(Reads, QualsLST[_id])
                            ]))
                    else:
                        probs = [
                            GetlogProb(_Read, H[_id], eps, _Qual, False,
                                       min_read_length)
                            for _Read, _Qual in zip(Reads, QualsLST[_id])
                        ]
                else:
                    if getcounts:
                        probs, counts = list(
                            zip(*[
                                GetlogProb(_Read, H[_id], eps, None, getcounts,
                                           min_read_length) for _Read in Reads
                            ]))
                    else:
                        probs = [
                            GetlogProb(_Read, H[_id], eps, None, False,
                                       min_read_length) for _Read in Reads
                        ]
                if getcounts:
                    counts = [
                        _count + [0 for _k in range(0, len(H[1].GetVS()))]
                        for _count in counts
                    ] if (_id == 0) else [
                        [0 for _k in range(0, len(H[0].GetVS()))] + _count
                        for _count in counts
                    ]
            else:  # if the read set belongs to an offspring
                if QualsLST[_id]:
                    if getcounts:
                        probs_m, counts_m = list(
                            zip(*[
                                GetlogProb(_Read, H[0], eps, _Qual, getcounts,
                                           min_read_length)
                                for _Read, _Qual in zip(Reads, QualsLST[_id])
                            ]))
                        probs_f, counts_f = list(
                            zip(*[
                                GetlogProb(_Read, H[1], eps, _Qual, getcounts,
                                           min_read_length)
                                for _Read, _Qual in zip(Reads, QualsLST[_id])
                            ]))
                    else:
                        probs_m = [
                            GetlogProb(_Read, H[0], eps, _Qual, False,
                                       min_read_length)
                            for _Read, _Qual in zip(Reads, QualsLST[_id])
                        ]
                        probs_f = [
                            GetlogProb(_Read, H[1], eps, _Qual, False,
                                       min_read_length)
                            for _Read, _Qual in zip(Reads, QualsLST[_id])
                        ]
                else:
                    if getcounts:
                        probs_m, counts_m = list(
                            zip(*[
                                GetlogProb(_Read, H[0], eps, None, getcounts,
                                           min_read_length) for _Read in Reads
                            ]))
                        probs_f, counts_f = list(
                            zip(*[
                                GetlogProb(_Read, H[1], eps, None, getcounts,
                                           min_read_length) for _Read in Reads
                            ]))
                    else:
                        probs_m = [
                            GetlogProb(_Read, H[0], eps, None, False,
                                       min_read_length) for _Read in Reads
                        ]
                        probs_f = [
                            GetlogProb(_Read, H[1], eps, None, False,
                                       min_read_length) for _Read in Reads
                        ]
                if getcounts:
                    counts = [[
                        1. / 2 * _count for _count in _countsm + _countsf
                    ] for _countsm, _countsf in zip(counts_m, counts_f)]
                #probs = [loge(1./2*(exp(_logprobm)+exp(_logprobf))) for _logprobm, _logprobf in zip(probs_m, probs_f)]
                probs = [
                    max(_logprobm, _logprobf)
                    for _logprobm, _logprobf in zip(probs_m, probs_f)
                ]
                #probs = [loge(exp(max(_logprobm,_logprobf))/(exp(_logprobm)+exp(_logprobf))) for _logprobm, _logprobf in zip(probs_m, probs_f)]
            if getcounts:
                countLST.append([
                    sum(_counts[_n] for _counts in counts)
                    for _n in range(0,
                                    len(H[0].GetVS()) + len(H[1].GetVS()))
                ])
            if pplog:
                probLST.append(sum(probs))
            else:
                probLST.append(exp(sum(probs)))
        except IndexError as e:  # Error that occurs at the event that the Reads set is empty
            if "index 0 is out of bounds for axis 0 with size 0" in e.args[0]:
                if getcounts:
                    countLST.append([
                        0
                        for _h in range(0,
                                        len(H[0].GetVS()) + len(H[1].GetVS()))
                    ])
                if pplog:
                    probLST.append(0)
                else:
                    probLST.append(1)
            else:
                raise
    if getcounts:
        if pplog:
            return sum(probLST), reduce(
                lambda x, y: [_x + _y for _x, _y in zip(x, y)], countLST)
        else:
            return reduce(lambda x, y: x * y, probLST), reduce(
                lambda x, y: [_x + _y for _x, _y in zip(x, y)], countLST)
    else:
        if pplog:
            return sum(probLST)
        else:
            return reduce(lambda x, y: x * y, probLST)
コード例 #46
0
ファイル: vp.py プロジェクト: ketranm/fan_vs_rnn
def train(opt):
    print('| build data iterators')
    train = Dataset(opt.train, opt.dict, batch_size=32, task='vp')
    valid = Dataset(opt.valid, opt.dict, batch_size=32, task='vp')

    if opt.n_words < 0:
        opt.n_words = len(train.dict)
    print('| vocab size %d' % opt.n_words)

    crit = nn.BCELoss(size_average=False).to(device)
    if opt.arch == 'rnn':
        print('Build LSTM model')
        model = RNNVP(opt.word_vec_size, opt.n_words, opt.layers,
                      opt.dropout)
    else:
        print('Build FAN model')
        model = TFNVP(opt.word_vec_size, opt.n_words, opt.num_heads,
                      opt.head_size, opt.layers, opt.inner_size,
                      opt.dropout)
    print(model)
    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    # tracking validation accuracy
    best_valid_acc = 0
    for eidx in range(opt.epochs):
        tot_loss = 0
        n_samples = 0
        model.train()  # make sure we are in training mode
        train.shuffle()
        ud_start = time.time()
        for i in range(len(train)):
            optimizer.zero_grad()
            x, y = prepare_batch(train[i])
            log_prob = model(x)
            loss = crit(log_prob.view(-1), y)
            n_samples += x.size(1)
            tot_loss += loss.item()
            loss.backward()
            if opt.max_grad_norm > 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               opt.max_grad_norm)
            optimizer.step()
            if i % opt.report_every == 0 and i > 0:
                ud = time.time() - ud_start
                args = [eidx, i, len(train), math.exp(tot_loss/n_samples),
                        opt.report_every/ud]
                print("| Epoch {:2d} | {:d} / {:d} | ppl {:.3f} "
                      "| speed {:.1f} b/s".format(*args))
                ud_start = time.time()

        print('| Evaluate')
        val_acc = eval(model, valid)

        if val_acc >= best_valid_acc:
            print('| Save checkpoint: %s | Valid acc: %f' %
                  (opt.save_model, val_acc))
            checkpoint = {'params': model.state_dict(),
                          'opt': opt,
                          'best_valid_acc': best_valid_acc}
            best_valid_acc = val_acc
            torch.save(checkpoint, opt.save_model)
コード例 #47
0
ファイル: sample_double_qw.py プロジェクト: zhuqy1996/aestimo
def Lss_profile_dop_diff(x,Q,Delta_Rp,Rp):   
    return Q/(2*sqrt(np.pi)*Delta_Rp*1e-7)*exp(-(x-Rp)**2/(4*Delta_Rp**2))
コード例 #48
0
def Branch_Founders(H,
                    G,
                    SReadsLST,
                    ploidy_levels,
                    rho,
                    error,
                    qscores=[None, None],
                    G_offspring=None,
                    Impute_Incompatible=True,
                    Impute_Missing=True,
                    redose=False):
    """ Branch the ordered pair of maternal and paternal haplotypes H = (Hm, Hf) to position s using the genotypes G = (Gm, Gf) at s, 
        and assign probability to each phasing extension through the semi-reads of all of the individuals in the family. For this purpose,
        an error rate, i.e. error, or the Q-scores of the reads are used to measure the likelihood of each read conditional on the haplotypes."""
    G = [_G for _G in G]  # convert tuple to list
    for _id in range(0, 2):
        if G[_id].isMISSING(
        ):  # If the genotype is missing, extension will be skipped or the genotype has to be imputed!
            if not Impute_Missing:  # without imputation, missing genotypes will simply be dropped from the final haplotypes
                garbage = sys.stderr.write(
                    'WARNING: {1:s}\'s genotype is missing at s={0:d}, position {2:d}! Phasing extension will be escaped at s={0:d} for {1:s}!\n'
                    .format(G[_id].GetS() + 1,
                            "Mother" if _id == 0 else "Father",
                            G[_id].GetPos()))
            else:
                garbage = sys.stderr.write(
                    'WARNING: {1:s}\'s genotype is missing at s={0:d}, position {2:d}! It will be imputed anew!\n'
                    .format(G[_id].GetS() + 1,
                            "Mother" if _id == 0 else "Father",
                            G[_id].GetPos()))
            G[_id] = Genotype(G[_id].GetS(), G[_id].GetPos(),
                              *['-' for _homologue in H[_id].GetVS()])
        if all(r.isNULL() for r in SReadsLST[_id]):
            sys.stderr.write(
                'WARNING: No semi-reads exist for {0:s} at SNP {1:d}, position {2:d}!\n'
                .format("Mother" if _id == 0 else "Father", G[_id].GetS() + 1,
                        G[_id].GetPos()))
    G = tuple(_G for _G in G)  # convert list back to tuple
    ProbH = H[0].GetRL(
    )  # H[0].GetRL() is the same as H[1].GetRL() as probability is assigned to a pair not to individuals.
    #for _id in range(0,2):
    #garbage = sys.stdout.write('Base Haplotype {2:s}, S= {1:d}, Pos= {3:d}:\n\t{0}\n'.format('\n\t'.join(('\t'.join(_x for _x in H[_id].GetGenotype(_pos))) for _pos in range(H[_id].GetStart(), H[_id].GetStop()+1)), G[_id].GetS(), "Mother" if _id==0 else "Father", G[_id].GetPos()))
    extend_H_branched = []
    extend_logprobs_branched = []
    uniques, priors, logrprobs, counts, Returned_Imputation = GetProbTot_Founders(
        H, G, G_offspring, SReadsLST, ploidy_levels, error, True, qscores,
        False, Impute_Incompatible, Impute_Missing, redose
    )  # An imputation is returned if al of the parental extensions are incompatible with the offspring genotypes and Impute_Incompatible=True, if genotypes are missing and Impute_Missing = True, or if redose=True
    if not uniques:
        return [[
            tuple(_H + Haplotypes(_G.GetS(), _G.GetS(), 0, 0, None, None, *
                                  ['-' for _homologue in _H.GetVS()])
                  for _H, _G in zip(H, G))
        ], Returned_Imputation
                ]  # skip extension if no extension has been possible
    logrprobs_adj = [_x + _y for (_x, _y) in zip(logrprobs, log(priors))
                     ]  # adjust P[SR(s)|Hp, H, eps] by its prior P[Hp|H, eps]
    _norm = max(logrprobs_adj)
    logrprobs_adj = [
        _x - _norm for _x in logrprobs_adj
    ]  # subtract the max log(prob) from the set of logprobs to prevent numerical underflow
    _norm = loge(sum(exp(_x) for _x in logrprobs_adj))
    if isinf(_norm):
        logHpprobs = [-loge(len(logrprobs_adj)) for _x in _x in logrprobs_adj]
    else:
        logHpprobs = [_x - _norm for _x in logrprobs_adj
                      ]  # obtain p[Hp|SR(s), H, eps] by P[SR(s)|Hp, H, eps]
    myrho = loge(rho)  # change rho to log scale
    Candid_Offspring_Extensions = []
    for _n, Hcandid in enumerate(
            uniques
    ):  # remove duplicate extensions that occur due to presence of similar homologues in H
        #garbage = sys.stdout.write('\tCandidate Extension:\n\t    {0}\n'.format('\t'.join(str(_x) for _x in Hcandid.GetGenotype(Hcandid.GetStop()))))
        #garbage = sys.stdout.write("\t    prob={:7.19f}, logprob= {:7.19f}\n".format(2**logHpprobs[_n], logHpprobs[_n]))
        if logHpprobs[
                _n] >= myrho:  # cut the extensions with an adjusted reads-probability lower than the threshold
            extend_H_branched.append(
                tuple(_Hcandid.GetCopy() for _Hcandid in Hcandid))
            extend_logprobs_branched.append(logHpprobs[_n])
            #garbage = sys.stdout.write("\t    Candidate Accepted!\n")
        else:
            #garbage = sys.stdout.write("\t    Candidate Rejected by rho!\n")
            pass
    if not extend_H_branched:
        garbage = sys.stderr.write(
            'WARNING: No founder extension survived the threshold at SNP {0:d}, position {1:d}!\n'
            .format(G[0].GetS() + 1, G[0].GetPos()))
        _maxindex = logHpprobs.index(max(logHpprobs))
        extend_H_branched.append(uniques[_maxindex])
        for _n in range(0, len(extend_H_branched[-1])):
            extend_H_branched[-1][_n].SetRL(logHpprobs[_maxindex])
    for _H, _prob in zip(
            extend_H_branched, extend_logprobs_branched
    ):  # Update the stored RL value of H during branching to\
        for _n in range(0, len(_H)):
            _H[_n].SetRL(ProbH + _prob)  # Update the RL of Hp for each founder
    return [extend_H_branched, Returned_Imputation]
コード例 #49
0
def train(model, optimizer, params, criterion, train_data, args, epoch, model_t=None, criterion_kd=None):
    # Turn on training mode which enables dropout.
    total_loss = 0
    start_time = time.time()
    hidden = model.init_hidden(args.batch_size)
    if args.kd:
        hidden_t = model_t.init_hidden(args.batch_size)
    batch, i = 0, 0
    while i < train_data.size(0) - 1 - 1:
        bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
        # Prevent excessively small or negative sequence lengths
        seq_len = max(5, int(np.random.normal(bptt, 5)))
        # There's a very small chance that it could select a very long sequence length resulting in OOM
        # seq_len = min(seq_len, args.bptt + 10)

        lr2 = optimizer.param_groups[0]['lr']
        optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
        model.train()
        data, targets = get_batch(train_data, i, args, seq_len=seq_len)

        # Starting each batch, we detach the hidden state from how it was previously produced.
        # If we didn't, the model would try backpropagating all the way to start of the dataset.
        hidden = repackage_hidden(hidden)
        optimizer.zero_grad()

        output, hidden, rnn_hs, dropped_rnn_hs = model(data, hidden, return_h=True)
        raw_loss = criterion(model.decoder.weight, model.decoder.bias, output, targets)
        # student_y = model.decoder(output)
        # raw_loss = criterion(student_y, targets)
        
        if args.kd:
            student_y = model.decoder(output)
            hidden_t = repackage_hidden(hidden_t)
            output_t, hidden_t, rnn_hs, _ = model_t(data, hidden_t, return_h=True)
            teacher_y = model_t.decoder(output_t)
            kdloss = criterion_kd(student_y, teacher_y)
        else:
            kdloss = -1

        loss = raw_loss
        # Activiation Regularization
        if args.alpha: loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
        # Temporal Activation Regularization (slowness)
        if args.beta: loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
        
        if args.kd:
            loss = (1.0 - args.kd_lmb) * loss + args.kd_lmb * kdloss

        loss.backward()

        # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
        if args.clip: torch.nn.utils.clip_grad_norm_(params, args.clip)
        optimizer.step()

        total_loss += raw_loss.data
        optimizer.param_groups[0]['lr'] = lr2
        if batch % args.log_interval == 0 and batch > 0:
            cur_loss = total_loss.item() / args.log_interval
            elapsed = time.time() - start_time
            print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | ms/batch {:5.2f} | '
                    'loss {:5.2f} | ppl {:8.2f} | bpc {:8.3f} | kdloss part {:8.2f}'.format(
                epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
                elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss), cur_loss / math.log(2), kdloss))
            total_loss = 0
            start_time = time.time()
        ###
        batch += 1
        i += seq_len
コード例 #50
0
ファイル: module_math.py プロジェクト: mumingv/python
#   math.tan(x)                 正切函数
#   math.asin(x)                反正弦函数
#   math.acos(x)                反余弦函数
#   math.atan(x)                反正切函数
# +---------------------------+--------------------------------------------+

# 示例
import math
print "pi =", math.pi  #输出:pi = 3.14159265359
print "e =", math.e  #输出:e = 2.71828182846
print math.ceil(3.14)  #输出:4.0
print math.ceil(4)  #输出:4.0
print math.ceil(-3.14)  #输出:-3.0
print math.floor(3.14)  #输出:3.0
print math.floor(4)  #输出:4.0
print math.floor(-3.14)  #输出:-4.0
print "pow:", math.pow(2, 3)  #输出:8.0
print "sqrt:", math.sqrt(4)  #输出:2.0
print "log:", math.log(math.e)  #输出:1.0
print "log10:", math.log10(10)  #输出:1.0
print "exp:", math.exp(2)  #输出:7.38905609893
print "degrees:", math.degrees(math.pi)  #输出:180.0
print "radians:", math.radians(360)  #输出:6.28318530718
print "sin:", math.sin(0)  #输出:0.0
print "cos:", math.cos(0)  #输出:1.0
print "tan:", math.tan(0)  #输出:0.0
print "asin:", math.asin(1)  #输出:1.57079632679(pi/2)
print "acos:", math.acos(1)  #输出:0.0
print "atan:", math.atan(1)  #输出:0.785398163397(pi/4)

コード例 #51
0
def run_ssd(img_path,priorbox_path):
    #caffe_proto="./MobileNetSSD_deploy.prototxt"
    caffe_proto= "./MobileNetSSD_deploy_truncated.prototxt"
    caffe_weight="./MobileNetSSD_deploy10695.caffemodel"

    rknn_model="./pedestrian_ssd.rknn"

    caffe2rknn(caffe_proto,caffe_weight,rknn_model)
    
    print("run ssd")
    rknn=RKNN(verbose=True)
    ret=rknn.load_rknn(path=rknn_model)
    ret=rknn.init_runtime()
    #ret = rknn.init_runtime(target='rk1808', device_id='012345789AB')

    img=cv2.imread(img_path)
    img=cv2.resize(img,(300,300))
    print("shape:",img.shape)
    outlen=7668 #change to your model

    priorbox=[]
    with open(priorbox_path) as f:
         for line in  f:
             arr=line.strip().split(",")
             priorbox=list(map(float,arr))
    priorbox=np.reshape(np.array(priorbox),(2,outlen))

    outputs = rknn.inference(inputs=[img])#,data_format="nchw",data_type="float32"

    print("pb:",priorbox.shape,priorbox)
    print("loc:",outputs[0].shape,outputs[0])
    print("conf:",outputs[1].shape,outputs[1])    

    NUM_RESULTS=outlen//4
    NUM_CLASSES=2
    box_priors= priorbox[0].reshape((NUM_RESULTS,4))
    box_var   = priorbox[1].reshape((NUM_RESULTS,4))
    loc =  outputs[0].reshape((NUM_RESULTS, 4))
    conf = outputs[1].reshape((NUM_RESULTS, NUM_CLASSES))

    #compute softmax
    conf = [[x/(x+y),y/(x+y)] for x,y in np.exp(conf)]

    # Post Process
    for i in range(0, NUM_RESULTS):

        pb = box_priors[i]
        lc = loc[i]
        var= box_var[i]

        pb_w = pb[2] - pb[0]
        pb_h = pb[3] - pb[1]
        pb_cx = (pb[0] + pb[2]) * 0.5;
        pb_cy = (pb[1] + pb[3]) * 0.5;

        bbox_cx = var[0] * lc[0] * pb_w + pb_cx;
        bbox_cy = var[1] * lc[1] * pb_h + pb_cy;
        bbox_w = math.exp(var[2] * lc[2]) * pb_w;
        bbox_h = math.exp(var[3] * lc[3]) * pb_h;

        xmin = bbox_cx - bbox_w * 0.5;
        ymin = bbox_cy - bbox_h * 0.5;
        xmax = bbox_cx + bbox_w * 0.5;
        ymax = bbox_cy + bbox_h * 0.5;

        xmin *= 300 #input width
        ymin *= 300 #input height
        xmax *= 300 #input width
        ymax *= 300 #input height

        score = conf[i][1];

        if score > 0.9:
            print("score:",score)
            cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)),(0, 0, 255), 3)

    plt.imshow(cv2.cvtColor(img,cv2.COLOR_RGB2BGR))
    plt.show()

    print("ssd finished")
コード例 #52
0
ファイル: sample_double_qw.py プロジェクト: zhuqy1996/aestimo
def Lss_profile_dop(x,Q,Delta_Rp,Rp):   
    return Q/(sqrt(2*np.pi)*Delta_Rp*1e-7)*exp(-(x-Rp)**2/(2*Delta_Rp**2))
コード例 #53
0
# HW04: Problem 04  Temperature of a rod problem
import math
import numpy as np
import matplotlib.pyplot as plt
D = 10
L = 100
T1 = 16
T2 = 11
T = []
j = np.arange(0, 110, 10)
for t in range(0, 510, 10):
    for x in range(0, 110, 10):
        k = T1 + (((T2 - T1) * x) / L) + (2 / math.pi) * (sum(
            ((((T2 - T1) * math.cos(n * math.pi)) / n) * (math.sin(
                (n * math.pi * x) / L) * (math.exp(
                    ((-D * (math.pi**2) * (n**2) * t) / (L**2)))))
             for n in range(1, 50))))
        T.append(k)
T10 = T[0:11]
T20 = T[11:22]
T40 = T[33:44]
T80 = T[77:88]
T160 = T[165:176]
T250 = T[264:275]
T500 = T[539:550]
print(T10)
print(T20)
print(T40)
print(T80)
print(T160)
print(T250)
コード例 #54
0
ファイル: MyClassifier.py プロジェクト: eman34/Ass2
def calc_pdf(x, mu, sig) -> float:
    result = 1.0 / (sig * math.sqrt(2 * math.pi)) * math.exp(
        (-(x - mu)**2) / (2 * sig**2))
    return result
コード例 #55
0
def holder_table(di):
    x0 = di['x0']
    x1 = di['x1']
    return -abs(sin(x0)*cos(x1)*exp(abs(1-sqrt(x0*x0+x1*x1)/pi)))
コード例 #56
0
ファイル: main.py プロジェクト: webatom/data_mining_2017
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogist, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100

correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

init = tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)

for i in range(701):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    max_learning_rate = 0.003
    min_learning_rate = 0.0001
    decay_speed = 2000.0
    learning_rate = min_learning_rate + (max_learning_rate-min_learning_rate)*math.exp(-i/decay_speed)

    a = sess.run(accuracy, {X: batch_xs, Y_: batch_ys})
    print(str(i)+": a :" + str(a) + " lr :" + str(learning_rate))
    sess.run(train_step, {X: batch_xs, Y_: batch_ys, lr: learning_rate})

    if i % 50 == 0:
        print(sess.run(accuracy, feed_dict={X: mnist.test.images, Y_: mnist.test.labels}))


saver = tf.train.Saver()
saver.save(sess, './model2.cpkt')
print("done");
コード例 #57
0
            else:
                del graph.nodes[nodeId]

        # generate a weight for each edge using the runtime of src and target
        for edgeId,edge in list(graph.edges.items()):
            # if src or target were removed delete edge instead
            if edge.src in graph.nodes and edge.target in graph.nodes:
                # get runtime of source and target
                sourceTime = float(graph.nodes[edge.src].values[fullSearchTimeStr])
                targetTime = float(graph.nodes[edge.target].values[fullSearchTimeStr])

                # weight = exp(-relativeDifference)
                if sourceTime == targetTime:
                    edge.values[fullSearchWeightStr] = 1
                else:
                    edge.values[fullSearchWeightStr] = math.exp(-abs(sourceTime - targetTime) / max(sourceTime, targetTime))
            else:
                del graph.edges[edgeId]

allNodeHeaders = [idStr, labelStr] + labelHeaders + nodeHeaders
allEdgeHeaders = [sourceStr, targetStr] + edgeHeaders

# write nodes into file
for i,graph in enumerate(graphs):
    with open(os.path.join(outputDir, nodesFile.format(i)), 'w', newline='') as f:
        nodesWriter = csv.writer(f, delimiter=',', quotechar='"')
        nodesWriter.writerow(allNodeHeaders)
        for node in graph.nodes.values():
            nodesWriter.writerow(node.getRow(nodeHeaders, labelHeaders))

コード例 #58
0
ファイル: BeggsandBrill.py プロジェクト: edwindt/peg_python
def Pgrad(P, T, oil_rate, wtr_rate, Gor, gas_grav, oil_grav, wtr_grav, d, angle):
    """Function to Calculate the Flowing Pressure Gradient by the Method of Beggs and Brill"""
    #P          pressure, psia
    #T          temperature, °F
    #oil_rate   oil flowrate, stb/d
    #wtr_rate   water flowrate, stb/d
    #Gor        producing gas-oil ratio, scf/stb
    #gas_grav   gas specific gravity
    #oil_grav   API oil gravity
    #wtr_grav   water specific gravity
    #d          pipe I.D., in.
    #angle      angle of pipe inclination in degrees
    #               90° = vertical
    #               0°  = horizontal
    
    #Set constants
    pi = math.pi   #4 * math.atan(1)                                               #Define pi
    Psep = 114.7                                                        #Separator pressure, psia
    Tsep = 50                                                           #Separator temperature, °F
   
    #Convert pipe angle from degrees to radians
    angle = angle * pi / 180
    
    #Calculate fluid properties
    Z = FluidProps.zfact((T + 460) / FluidProps.Tc(gas_grav), P / FluidProps.Pc(gas_grav))               #Gas compressibility factor
    Wor = wtr_rate / oil_rate                                           #Water-oil ratio, stb/stb
    TDS = FluidProps.salinity(wtr_grav)                                            #Water salinity, wt% total dissolved solids
    Pb = FluidProps.Pbub(T, Tsep, Psep, gas_grav, oil_grav, Gor)                   #Bubble point pressure, psia
    Rso = FluidProps.sol_gor(T, P, Tsep, Psep, Pb, gas_grav, oil_grav)             #Solution gas-oil ratio, scf/stb
    Rsw = FluidProps.sol_gwr(P, T, TDS)                                            #Solution gas_water ratio, scf/stb
    Bo = FluidProps.oil_fvf(T, P, Tsep, Psep, Pb, Rso, gas_grav, oil_grav)         #Oil formation volume factor, rb/stb
    Bw = FluidProps.wtr_fvf(P, T, TDS)                                             #Water formation volume factor, rb/stb
    Bg = FluidProps.gas_fvf(P, T, gas_grav)                                        #Gas formation volume factor, ft_/scf
    muo = FluidProps.oil_visc(T, P, Tsep, Psep, Pb, Rso, gas_grav, oil_grav)       #Oil viscosity, cp
    muw = FluidProps.wtr_visc(P, T, TDS)                                           #Water viscosity, cp
    mug = FluidProps.gvisc(P, T + 460, Z, gas_grav)                                #Gas viscosity, cp
    rhoo = FluidProps.oil_dens(T, P, Tsep, Psep, Pb, Bo, Rso, gas_grav, oil_grav)  #Oil density, lb/ft_
    rhow = 62.368 * wtr_grav / Bw                                                  #Water density, lb/ft_
    rhog = 2.699 * gas_grav * P / (T + 460) / Z                                    #Gas density, lb/ft_
    sigo = FluidProps.oil_tens(P, T, oil_grav)                                     #Gas-oil interfacial tension, dynes/cm
    sigw = FluidProps.wtr_tens(P, T)                                               #Gas-water interfacial tension, dynes/cm

    #Volume fraction weighted liquid properties
    rhol = (Bw * Wor * rhow + Bo * rhoo) / (Bw * Wor + Bo)              #Liquid density
    mul = (Bw * Wor * rhow) / (Bw * Wor * rhow + Bo * rhoo) * muw + (Bo * rhoo) / (Bw * Wor * rhow + Bo * rhoo) * muo             #Liquid viscosity
    sigl = (Bw * Wor * rhow) / (Bw * Wor * rhow + Bo * rhoo) * sigw + (Bo * rhoo) / (Bw * Wor * rhow + Bo * rhoo) * sigo           #Gas-liquid interfacial tension
    
    #Calculate downhole fluid flowrates in ft_/s
    qo = Bo * oil_rate / 15387                                          #Oil flowrate
    qw = Bw * Wor * oil_rate / 15387                                    #Water flowrate
    ql = qo + qw                                                        #Liquid flowrate
    if ((Gor - Rso) < 0):                                        #If gas flowrate is negative, set to zero
        qg = 0
    else:
        qg = Bg * (Gor - Rso - Rsw * Wor) * oil_rate / 86400
    
        
    #Calculate fluid superficial velocities in ft/s
    Axs = pi / 4 * (d / 12) ** 2                                         #X-sectional area of pipe, ft_
    usl = ql / Axs                                                      #Liquid superficial velocity
    usg = qg / Axs                                                      #Gas superficial velocity
    um = usl + usg                                                      #Mixture superficial velocity
    
    #Determine flow regime
    Nfr = um ** 2 / (d / 12) / 32.174                                    #Froude number
    Nvl = 1.938 * usl * (rhol / sigl) ** 0.25                            #Liquid velocity number
    laml = usl / um                                                     #Input liquid fraction
    lamg = 1 - laml                                                     #Input gas fraction
    L1 = 316 * laml ** 0.302                                             #Dimensionless constants
    L2 = 0.0009252 * laml ** -2.4684
    L3 = 0.1 * laml ** -1.4516
    L4 = 0.5 * laml ** -6.738

    regime = Flow_regime(Nfr, laml, L1, L2, L3, L4)
                 
    #Calculate holdups
    if (regime == 2):
        a = (L3 - Nfr) / (L3 - L2)
        yl_seg = Liq_holdup(Nfr, Nvl, laml, angle, 1)
        yl_int = Liq_holdup(Nfr, Nvl, laml, angle, 3)
        yl = a * yl_seg + (1 - a) * yl_int
    else:
        yl = Liq_holdup(Nfr, Nvl, laml, angle, regime)
    
    yg = 1 - yl
    
    #Calculate fluid mixture properties
    rhom = rhol * laml + rhog * lamg                                     #Input fraction weighted density, lb/ft_
    mum = mul * laml + mug * lamg                                        #Input fraction weighted viscosity, cp
    rhobar = rhol * yl + rhog * yg                                       #In-situ average density, lb/ft_
    
    #Calculate friction factor
    Nre = 1488 * rhom * um * (d / 12) / mum                              #Reynolds number
    fn = Fric(Nre, 0.0006)                                                    #No-slip friction factor
    x = laml / yl ** 2
    if ((x > 1) and (x < 1.2)):
        s = math.log(2.2 * x - 1.2)
    else:
        s = math.log(x) / (-0.0523 + 3.182 * math.log(x) - 0.8725 * (math.log(x)) ** 2 + 0.01853 * (math.log(x)) ** 4)
    
    ftp = fn * math.exp(s)                                                    #Two-phase friction factor
    
    #Calculate gradients
    Pgrad_pe = rhobar * math.sin(angle) / 144                                 #Potential energy pressure gradient, psi/ft
    Pgrad_f = 2 * ftp * rhom * um ** 2 / 32.17 / (d / 12) / 144           #Frictional pressure gradient, psi/ft
    Ek = um * usg * rhobar / 32.17 / P / 144                              #Kinetic energy factor
    return (Pgrad_pe + Pgrad_f) / (1 - Ek)                               #Overall pressure gradient, psi/ft
コード例 #59
0
 def vanna(self):
     if not self.hasLevel:
         raise RuntimeError('Option current levels has not been set')
     if self.tToExpiry <= 0.0: return float('nan')
     exp_minus_qt = math.exp(-self.dvdYield * self.tToExpiry)
     return -exp_minus_qt * norm.pdf(self.d1) * self.d2 / self.ivol
コード例 #60
0
ファイル: utils.py プロジェクト: LiamZhuuu/CSE250A
def sigmod(z):
    return 1.0 / (1.0 + exp(-z))