def mod_scores( self, x, k ): if x> 10: x=10.0 gam = gamma_complete(k/2.0) part1 = 2**(k/2.0) part2 = x**((k/2.0)-1) part3 = math.exp(-x/2.0) tot1 = part2*part3/(part1*gam) bart1 = gamma_incomplete_complement(k/2.0, x/2.0) tot2 = bart1/gam return math.log(tot1), math.log(tot2)
def normalize(self): result = self.compute_normalisation_constant() self.coefs[0] = self.coefs[0] - 2.0 * math.log(result) self.polynome = chebyshev_polynome(self.n, -1.0, +1.0, self.coefs) result = self.compute_normalisation_constant() if abs(result - 1.0) > 1e-6: self.normalize()
def __init__(self, penalty_factor=1, penalty_scale=100, min_functional=1.e-10): adopt_init_args(self, locals()) self.occupancy_max = -math.log(min_functional) \ / (penalty_factor * penalty_scale)
def q_range_analyses(self, rg, io, rat_lim=1.5, window_size=3, level=10.0, sigma=False): selector = flex.bool(self.data.q*rg<rat_lim) tmp_q = self.data.q.select( selector ) tmp_i = self.data.i.select( selector ) tmp_s = self.data.s.select( selector ) rg2 = rg*rg lni = math.log( io ) cs = None if sigma: cs = self.chi_square( lni, rg2, tmp_q, tmp_i, tmp_s,False ) cs = flex.sqrt( cs ) else: cs = flex.exp(lni-rg2*tmp_q*tmp_q/3.0) ss = cs/100.0 cs = flex.abs(tmp_i-cs)/ss not_okai_ranges = [] previous_one_was_bad=False tmp_range = [] for ii in xrange( window_size, cs.size() ): tmp_cs = flex.mean( cs[ii-window_size:ii] ) if tmp_cs > level: if not previous_one_was_bad: tmp_range.append( tmp_q[ii] ) tmp_range.append( tmp_q[ii] ) previous_one_was_bad=True else: tmp_range[1] = tmp_q[ii] else: previous_one_was_bad=False if len(tmp_range)>0: not_okai_ranges.append( tmp_range ) tmp_range=[] return not_okai_ranges
def gradient(self, occupancy): if (occupancy > self.occupancy_max): return 0 if (occupancy < -1): occupancy = -1 - math.log(-occupancy) s = self.penalty_scale return -s * self.penalty_factor * math.exp( -s * occupancy * self.penalty_factor)
def functional(self, occupancy): if (occupancy > self.occupancy_max): return 0 if (occupancy < -1): occupancy = -1 - math.log(-occupancy) s = self.penalty_scale return math.exp(-s * occupancy * self.penalty_factor)
def gradient(self, occupancy): if (occupancy > self.occupancy_max): return 0 if (occupancy < -1): occupancy = -1 - math.log(-occupancy) s = self.penalty_scale return -s*self.penalty_factor*math.exp(-s*occupancy*self.penalty_factor)
def functional(self, occupancy): if (occupancy > self.occupancy_max): return 0 if (occupancy < -1): occupancy = -1 - math.log(-occupancy) s = self.penalty_scale return math.exp(-s*occupancy*self.penalty_factor)