def move(self): self.location[0] += self.velocity[0] self.location[1] += self.velocity[1] self.blit_location[0] = self.location[0] - abs(math_sin(math_pi*self.angle/90)*self.image.get_width()/math_sqrt(8)) self.blit_location[1] = self.location[1] - abs(math_sin(math_pi*self.angle/90)*self.image.get_width()/math_sqrt(8)) if 0 > self.location[0] + self.image.get_width() // 2: self.location[0] = const.WINDOW_WIDTH - self.image.get_width() // 2 elif self.location[0] + self.image.get_width() // 2 > const.WINDOW_WIDTH: self.location[0] = - self.image.get_width() // 2 if 0 > self.location[1] + self.image.get_height() // 2: self.location[1] = const.WINDOW_HEIGHT - self.image.get_height() // 2 elif self.location[1] + self.image.get_height() // 2 > const.WINDOW_HEIGHT: self.location[1] = - self.image.get_height() // 2 self.velocity[0] += self.acceleration[0] self.velocity[1] += self.acceleration[1] if math_sqrt( self.velocity[0]**2 + self.velocity[1]**2 ) > 10: self.velocity[0] *= 10 / math_sqrt( self.velocity[0]**2 + self.velocity[1]**2 ) self.velocity[1] *= 10 / math_sqrt( self.velocity[0]**2 + self.velocity[1]**2 ) #reset acceleration self.acceleration[0] = 0 self.acceleration[1] = 0
def get_term_meaning_score(self, term, weight_this_shop, weight_other_shops, number_of_other_shops): # 'longer' and 'multi-word' are better length_score = math_sqrt(len(term)) * len(term.split(' ')) # less popular is better average_weight_other_shops = weight_other_shops / number_of_other_shops relative_popularity_score = ( weight_this_shop / math_sqrt((average_weight_other_shops) + 1)) score = length_score * relative_popularity_score return score
def getStraightness(line, start,end): """ :param line: object helper.polygon :param start: index of the starting point :param end: index of the ending point :return: """ s=0 for i in range(start,end): s=+ math_sqrt( (line.col[i]-line.col[i+1]) * (line.col[i]-line.col[i+1]) + (line.row[i]-line.row[i+1]) * (line.row[i]-line.row[i+1]) ) distance = math_sqrt( (line.col[start]-line.col[end]) * (line.col[start]-line.col[end]) + (line.row[start]-line.row[end]) * (line.row[start]-line.row[end]) ) return distance / s
def create_prior_boxes(self, aspect_ratios): feature_map_dimensions = { 'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5, 'conv10_2': 3, 'conv11_2': 1 } obj_scales = { 'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375, 'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9 } feature_maps = list(feature_map_dimensions.keys()) prior_boxes = [] for k, fmap in enumerate(feature_maps): for i in range(feature_map_dimensions[fmap]): for j in range(feature_map_dimensions[fmap]): cx = (j + 0.5) / feature_map_dimensions[fmap] cy = (i + 0.5) / feature_map_dimensions[fmap] for ratio in aspect_ratios[fmap]: prior_boxes.append([ cx, cy, obj_scales[fmap] * math_sqrt(ratio), obj_scales[fmap] / math_sqrt(ratio) ]) if ratio == 1.: try: additional_scale = math_sqrt( obj_scales[fmap] * obj_scales[feature_maps[k + 1]]) except IndexError: additional_scale = 1. prior_boxes.append( [cx, cy, additional_scale, additional_scale]) prior_boxes = self._to_cuda(FloatTensor(prior_boxes)) prior_boxes.clamp_(0, 1) return prior_boxes
def statistics(self): """ Do statistical analysis of population and set 'statted' to True """ if self.statted: return logging.debug("Running statistical calc.") raw_sum = 0 len_pop = len(self) for ind in xrange(len_pop): raw_sum += self[ind].score self.stats["rawMax"] = max(self, key=key_raw_score).score self.stats["rawMin"] = min(self, key=key_raw_score).score self.stats["rawAve"] = raw_sum / float(len_pop) tmpvar = 0.0; for ind in xrange(len_pop): s = self[ind].score - self.stats["rawAve"] s*= s tmpvar += s tmpvar/= float((len(self) - 1)) self.stats["rawDev"] = math_sqrt(tmpvar) self.stats["rawVar"] = tmpvar self.statted = True
def cuda_gw_hist(data, bins, scale, gw_hist_out): """Increment weighted bin counts in gw_hist_out, given an array of bins Parameters ---------- data: ndarray of shape (ndata, ) bins: ndarray of shape (nbins, ) scale: ndarray of shape (ndata, ) gw_hist_out: ndarray of shape (nbins -1, ) empty array to store result """ # find where this job goes over start = cuda.grid(1) stride = cuda.gridsize(1) # define some useful things bot = bins[0] sqrt2 = math_sqrt(2.) # loop over the data set - each thread now looks at one data point. for i in range(start, data.shape[0], stride): z = (data[i] - bot) / scale[i] / sqrt2 last_cdf = 0.5 * (1. + math_erf(z)) # for each bin, calculate weight and add it in for j in range(1, bins.shape[0]): bin_edge = bins[j] z = (data[i] - bin_edge) / scale[i] / sqrt2 new_cdf = 0.5 * (1. + math_erf(z)) weight = last_cdf - new_cdf # atomic add to bin to avoid race conditions cuda.atomic.add(gw_hist_out, j - 1, weight) last_cdf = new_cdf
def filterByResponseMeanStd(lines, response_map, sigmafactor_max, sigmafactor_min, double_filament_insensitivity, fitDistr=False): """ :param lines: list of object helper.polygon :param response_map: :param sigmafactor_max: :param sigmafactor_min: :param double_filament_insensitivity: :param fitDistr: :return: """ filtered=list() # it will be a list of polygon ''' Calculate mean response over all lines ''' sum_mean_line_response = 0 for l in lines: sum_mean_line_response += meanResponse(l=l, response_map=response_map) mean_response = sum_mean_line_response / len(lines) ''' Calculate standard deviation of the response ''' sd=0 n=0 for l in lines: n+=l.num for i, j in zip(l.col, l.row): value= response_map[i, j]-mean_response sd += (value*value) sd=math_sqrt(sd/n) ''' Fit a distribution to get better estimates for mean response and the standard deviation ''' if fitDistr is True: fitted=fitNormalDistributionToHist(hist=getResponseHistogram(lines=lines, response_map=response_map)) mean_response=fitted[0] sd = fitted[1] ''' Calculate the thresholds ''' th_power=pow(10,-6) threshold_min = JAVA_MIN_DOUBLE if sigmafactor_min<th_power else mean_response-(sd*sigmafactor_min) threshold_max = mean_response + (sd * sigmafactor_max) if sigmafactor_max<th_power: '''no max threshold''' threshold_max=JAVA_MAX_DOUBLE elif threshold_max>255: threshold_max=254.9 ''' For each line: Count the number of positions (pixel) which has a response below threshold_min or above threshold_max. If the number if higher then a the number of positions times some factor (0-1) the filament will be excluded. ''' for l in lines: nOver = 0 nUnder = 0 for x, y in zip(l.col, l.row): if response_map[x, y] > threshold_max: nOver += 1 if response_map[x, y] < threshold_min: nUnder += 1 numberOfPointsToBeExcluded = int(l.num * double_filament_insensitivity) if nUnder < numberOfPointsToBeExcluded and nOver < numberOfPointsToBeExcluded: filtered.append(l) return filtered
def non_iter_ls_inv_stft(stft_object): stft_data = stft_object['stft'] origSigSize = stft_object['origSigSize'] num_rows, _, _ = origSigSize shift_length = stft_object['shift_length'] len_each_section, num_rows_overlap, _, _ = stft_data.shape # TODO: Isn't this just num_rows in the very beginning? # total_new_elements = (num_rows_overlap - 1) * shift_length + len_each_section win_info = stft_object['win_info'] wVec = win_info(len_each_section) wVecSq = wVec**2 vecC = np_arange(1, num_rows_overlap * shift_length, step=shift_length) # vecC = range(0, num_rows_overlap*shift_length-1, shift_length) DlsArr = np_zeros((num_rows, )) for j in vecC: tmpArr = np_arange(j - 1, len_each_section + j - 1) # tmpArr = np_arange(j, len_each_section+j) DlsArr[tmpArr] += wVecSq # DlsArrInv = 1/DlsArr invFT = math_sqrt(len_each_section) * np_ifft(stft_data, axis=0) invFT_real = invFT.real invFT *= wVec[:, np_newaxis, np_newaxis, np_newaxis] yEst = np_zeros(origSigSize) for index, j in enumerate(vecC): tmpArr = np_arange(j - 1, len_each_section + j - 1) yEst[tmpArr, :] += invFT_real[:, index, :] # sigOut = yEst * DlsArrInv[:, np_newaxis, np_newaxis] sigOut = yEst / DlsArr[:, np_newaxis, np_newaxis] return sigOut
def statistics(self): """ Do statistical analysis of population and set 'statted' to True """ if self.statted: return logging.debug("Running statistical calculations") raw_sum = 0 len_pop = len(self) for ind in range(len_pop): raw_sum += self[ind].score self.stats["rawMax"] = max(self, key=key_raw_score).score self.stats["rawMin"] = min(self, key=key_raw_score).score self.stats["rawAve"] = raw_sum / float(len_pop) tmpvar = 0.0 for ind in range(len_pop): s = self[ind].score - self.stats["rawAve"] s *= s tmpvar += s tmpvar /= float((len(self) - 1)) try: self.stats["rawDev"] = math_sqrt(tmpvar) except ValueError: # TODO test needed self.stats["rawDev"] = 0.0 self.stats["rawVar"] = tmpvar self.statted = True
def addsub(number, name, left, right): if left.units != right.units and left.number and right.number: raise_QuantError("Units in sum not compatible", "%s + %s", (left, right)) prefu, provenance = inherit_binary(left, right) units = left.units if left.number else left.units uncert = math_sqrt(left.uncert ** 2 + right.uncert ** 2) return Q(number, name, units, uncert, prefu, provenance)
def wilson_score_interval(x: int, n: int, conflevel: Union[float, None] = 0.95, z: Union[float, None] = None): # LaTeX: $$(w^-, w^+) = \frac{p + z^2/2n \pm z\sqrt{p(1-p)/n + z^2/4n^2}}{1+z^2/n}$$ """Calculates confidence interval for proportions using Wilson Score Interval method `x` - succeeded trials `n` - total trials `conflevel` - confidence level (0 < float < 1). Defaults to 0.95 if its unset and *z* is unset `z` - z score. If unset, calculated form the given *conflevel* """ if x > n: raise ValueError( f"Number of succeeded trials (x) has to be no more than number of total trials (n). x = {x} and n = {n} were passed" ) z = normal_z_score_two_tailed(conflevel) p = float(x) / n denom = 1 + ((z**2) / n) mean = p + ((z**2) / (2 * n)) diff = z * math_sqrt(p * (1 - p) / n + (z**2) / (4 * n**2)) ci = ((mean - diff) / denom, (mean + diff) / denom) return ci
def getRMSE(self): """ Return the root mean square error :rtype: float RMSE """ return math_sqrt(self.acc_square / float(self.acc_len))
def statistics(self): """ Do statistical analysis of population and set 'statted' to True """ if self.statted: return logging.debug("Running statistical calculations") raw_sum = 0 fit_sum = 0 len_pop = len(self) for ind in xrange(len_pop): raw_sum += self[ind].score #fit_sum += self[ind].fitness self.stats["rawMax"] = max(self, key=key_raw_score).score self.stats["rawMin"] = min(self, key=key_raw_score).score self.stats["rawAve"] = raw_sum / float(len_pop) #self.stats["rawTot"] = raw_sum #self.stats["fitTot"] = fit_sum tmpvar = 0.0 for ind in xrange(len_pop): s = self[ind].score - self.stats["rawAve"] s*= s tmpvar += s tmpvar/= float((len(self) - 1)) try: self.stats["rawDev"] = math_sqrt(tmpvar) except: self.stats["rawDev"] = 0.0 self.stats["rawVar"] = tmpvar self.statted = True
def numba_kernel_histogram(data, bins, scale, khist): """ Parameters ---------- data : ndarray of shape (ndata, ) bins : ndarray of shape (nbins, ) scale : float or ndarray of shape (ndata, ) khist : ndarray of shape (nbins-1, ) Empty array used to store the result """ ndata = len(data) nbins = len(bins) bot = bins[0] sqrt2 = math_sqrt(2) for i in range(ndata): x = data[i] z = (x - bot)/scale/sqrt2 last_cdf = 0.5*(1.+math_erf(z)) for j in range(1, nbins): bin_edge = bins[j] z = (x - bin_edge)/scale/sqrt2 new_cdf = 0.5*(1.+math_erf(z)) weight = last_cdf - new_cdf khist[j-1] += weight last_cdf = new_cdf
def Z_test_pooled(xT: int, nT: int, xC: int, nC: int, conflevel: float = 0.95) -> Tuple[float, float]: # LATEX: $$ (\delta^-, \delta^+) = \hat{p}_T - \hat{p}_C \pm z_{\alpha}\sqrt{\bar{p}(1-\bar{p})(\frac{1}{n_T}+\frac{1}{n_C})} $$ # $$ \bar{p} = \frac{n_T\hat{p}_T + n_C\hat{p}_C}{n_T + n_C} $$ """Calculates confidence interval for the difference between two proportions using Z test (pooled) method `xT` - succeeded trials in the experimental (trial) group `nT` - total trials in the experimental (trial) group `xC` - succeeded trials in the control group `nC` - total trials in the control group `conflevel` - confidence level (0 < float < 1). Defaults to 0.95 if its unset and *z* is unset `z` - z score. If unset, calculated form the given *conflevel* """ pT = float(xT)/nT pC = float(xC)/nC z = normal_z_score_two_tailed(conflevel) delta = pC - pT p_bar = (nT*pT + nC*pC)/(nT + nC) sd = math_sqrt(p_bar*(1-p_bar)*(1/nT + 1/nC)) z_sd = abs(z*sd) ci = ( delta - z_sd, delta + z_sd ) return ci
def muldiv(number, name, units, self, other): if number: uncert = math_sqrt((self.uncert / self.number) ** 2 + (other.uncert / other.number) ** 2) * abs(number) else: uncert = 0.0 if hasattr(number, 'denominator') and number.denominator == 1: number = int(number) prefu, provenance = inherit_binary(self, other) return Q(number, name, units, uncert, prefu, provenance)
def test_against_uncertainties_package(): try: from uncertainties import ufloat from uncertainties import umath from math import sqrt as math_sqrt except ImportError: return X, varX = 0.5, 0.04 Y, varY = 3, 0.09 N = 3 ux = ufloat(X, math_sqrt(varX)) uy = ufloat(Y, math_sqrt(varY)) def _compare(result, u): Z, varZ = result assert abs(Z-u.n)/u.n < 1e-13 and (varZ-u.s**2)/u.s**2 < 1e-13, \ "expected (%g,%g) got (%g,%g)"%(u.n, u.s**2, Z, varZ) def _check_pow(u): _compare(pow(X, varX, N), u) def _check_unary(op, u): _compare(op(X, varX), u) def _check_binary(op, u): _compare(op(X, varX, Y, varY), u) _check_pow(ux**N) _check_binary(add, ux + uy) _check_binary(sub, ux - uy) _check_binary(mul, ux * uy) _check_binary(div, ux / uy) _check_binary(pow2, ux**uy) _check_unary(exp, umath.exp(ux)) _check_unary(log, umath.log(ux)) _check_unary(sin, umath.sin(ux)) _check_unary(cos, umath.cos(ux)) _check_unary(tan, umath.tan(ux)) _check_unary(arcsin, umath.asin(ux)) _check_unary(arccos, umath.acos(ux)) _check_unary(arctan, umath.atan(ux)) _check_binary(arctan2, umath.atan2(ux, uy))
def test_against_uncertainties_package(): try: from uncertainties import ufloat from uncertainties import umath from math import sqrt as math_sqrt except ImportError: return X, varX = 0.5, 0.04 Y, varY = 3, 0.09 N = 3 ux = ufloat(X, math_sqrt(varX)) uy = ufloat(Y, math_sqrt(varY)) def _compare(result, u): Z, varZ = result assert abs(Z-u.n)/u.n < 1e-13 and (varZ-u.s**2)/u.s**2 < 1e-13, \ "expected (%g,%g) got (%g,%g)"%(u.n,u.s**2,Z,varZ) def _check_pow(u): _compare(pow(X, varX, N), u) def _check_unary(op, u): _compare(op(X, varX), u) def _check_binary(op, u): _compare(op(X, varX, Y, varY), u) _check_pow(ux**N) _check_binary(add, ux+uy) _check_binary(sub, ux-uy) _check_binary(mul, ux*uy) _check_binary(div, ux/uy) _check_binary(pow2, ux**uy) _check_unary(exp, umath.exp(ux)) _check_unary(log, umath.log(ux)) _check_unary(sin, umath.sin(ux)) _check_unary(cos, umath.cos(ux)) _check_unary(tan, umath.tan(ux)) _check_unary(arcsin, umath.asin(ux)) _check_unary(arccos, umath.acos(ux)) _check_unary(arctan, umath.atan(ux)) _check_binary(arctan2, umath.atan2(ux, uy))
def binomial_distribution_two_tailed_range(n: int, p: float, sds: float): """ Calculates range of `x` values that span `sds` standard deviations from the mean of a binomial distribution with parameters `n` and `p`: `Binom(n,p)` """ M = n * p sd = math_sqrt(n * p * (1 - p)) (x_from, x_to) = max(0, np_floor(M - sds * sd) - 1), min(n, np_ceil(M + sds * sd) + 1) # (y_from, y_to) = binomial_distribution.pmf(x_from, n, p), binomial_distribution.pmf(x_to, n, p) return int(x_from), int(x_to)
def _gpu_euclidean_norm(A, B, C): """ Compute the euclidean norm with a numpy guvectorize function on the GPU (d, m, p),(d, p, n)->(m,n) """ d, m, p = A.shape d, p, n = B.shape for i in range(m): for j in range(n): C[i, j] = 0 for k in range(d): C[i, j] += (A[k, i, 0] - B[k, 0, j])**2 C[i, j] = math_sqrt(C[i, j])
def wilson_score_interval_continuity_corrected(x: int, n: int, conflevel: Union[float, None] = 0.95, z: Union[float, None] = None): # LaTeX: # $$w_{cc}^- = \frac{2np + z^2 - (z\sqrt{z^2 - 1/n + 4np(1-p) + (4p-2)} + 1)}{2(n+z^2)}$$ # $$w_{cc}^+ = \frac{2np + z^2 + (z\sqrt{z^2 - 1/n + 4np(1-p) - (4p-2)} + 1)}{2(n+z^2)}$$ # or, simplified: # $$e = 2np + z^2;\,\,\, f = z^2 - 1/n + 4np(1-p);\,\,\, g = (4p - 2);\,\,\, h = 2(n+z^2)$$ # $$w_{cc}^- = \frac{e - (z\sqrt{f+g} + 1)}{h}$$ # $$w_{cc}^+ = \frac{e + (z\sqrt{f-g} + 1)}{h}$$ """Calculates confidence interval for proportions using Wilson Score Interval method with correction for continuity `x` - succeeded trials `n` - total trials `conflevel` - confidence level (0 < float < 1). Defaults to 0.95 if its unset and *z* is unset `z` - z score. If unset, calculated form the given *conflevel* """ if x > n: raise ValueError( f"Number of succeeded trials (x) has to be no more than number of total trials (n). x = {x} and n = {n} were passed" ) z = normal_z_score_two_tailed(conflevel) p = float(x) / n e = 2 * n * p + z**2 f = z**2 - 1 / n + 4 * n * p * (1 - p) g = (4 * p - 2) h = 2 * (n + z**2) ci = ((e - (z * math_sqrt(f + g) + 1)) / h, (e + (z * math_sqrt(f - g) + 1)) / h) return ci
def muldiv(number, name, units, self, other): if number: uncert = math_sqrt((self.uncert / self.number) ** 2 + (other.uncert / other.number) ** 2) * abs(number) elif not other.uncert: uncert = self.uncert elif not self.uncert: uncert = other.uncert else: uncert = 0.0 if hasattr(number, 'denominator') and number.denominator == 1: number = int(number) prefu, provenance = inherit_binary(self, other) if '°aC' in prefu: prefu.remove('°aC') return Q(number, name, units, uncert, prefu, provenance)
def svd(a, r): if type(a) != 'numpy.ndarray': A = np.array(a) ATA = np.dot(A.T, A) eig_val, eig_vect = np.linalg.eig(ATA) # sort values vectors, make diag matrix eig_val, eig_vect_t = sorterForEigenValuesAndVectors(eig_val, eig_vect.T) min_len = r VT = eig_vect_t[:min_len, :] eig_val = np.array([x for x in eig_val if x > 1.e-8]) eig_vect = (eig_vect_t[:len(eig_val), :]).T S = np.array([math_sqrt(x) for x in eig_val]) E = makeDiagonalFromValues(S, np.shape(VT)[0], min_len) U = make_U(S, A, eig_vect, min_len) UE = np.dot(U, E) UEVT = np.dot(UE, VT) return np.real(UEVT)
def addsub(number, name, left, right): if left.units != right.units and left.number and right.number: raise_QuantError("Units in sum/difference not compatible", name, (left, right)) prefu, provenance = inherit_binary(left, right) units = left.units if left.number else right.units uncert = math_sqrt(left.uncert ** 2 + right.uncert ** 2) if '°aC' in prefu: if units != unitquant['K'].units: raise_QuantError("Units °aC in sum/difference can't have compound units", name, (left, right)) if '°aC' in left.prefu and '°ΔC' in right.prefu: prefu.discard('°ΔC') elif '°aC' in right.prefu and '°ΔC' in left.prefu and name == '%s + %s': prefu.discard('°ΔC') elif '°aC' in left.prefu and '°aC' in right.prefu and name == '%s - %s': prefu.discard('°aC') prefu.add('°ΔC') elif left.number and right.number: #but adding zero is o.k. prefu.discard('°aC') return Q(number, name, units, uncert, prefu, provenance)
def statistics(self): """ Do statistical analysis of population and set 'statted' to True """ if self.statted: return log.debug("Running statistical calculations ...") raw_sum = 0 len_pop = len(self) for ind in xrange(len_pop): raw_sum += self[ind].score # Set maximum, minimum and average self.stats["rawMax"] = max(self.individuals, key=key_raw_score).score self.stats["rawMin"] = min(self.individuals, key=key_raw_score).score self.stats["rawAve"] = raw_sum / float(len_pop) # Calculate the variance tmpvar = 0.0 #for ind in xrange(len_pop): for ind in self: #s = self[ind].score - self.stats["rawAve"] s = ind.score - self.stats["rawAve"] s *= s tmpvar += s tmpvar /= float((len(self) - 1)) # Set the standard deviation try: self.stats["rawDev"] = math_sqrt(tmpvar) except ValueError: self.stats["rawDev"] = 0.0 # Set the variance self.stats["rawVar"] = tmpvar # Set statted flag self.statted = True
def wald_interval(x: int, n: int, conflevel: float = 0.95): # LaTeX: $$(w^-, w^+) = \hat{p}\,\pm\,z\sqrt{\frac{\hat{p}(1-\hat{p})}{n}}$$ """Calculates confidence interval for proportions using Wald Interval method `x` - succeeded trials `n` - total trials `conflevel` - confidence level (0 < float < 1). Defaults to 0.95 if its unset and *z* is unset `z` - z score. If unset, calculated form the given *conflevel* """ if x > n: raise ValueError( f"Number of succeeded trials (x) has to be no more than number of total trials (n). x = {x} and n = {n} were passed" ) z = normal_z_score_two_tailed(conflevel) p = float(x) / n sd = math_sqrt((p * (1 - p)) / n) z_sd = z * sd ci = (p - z_sd, p + z_sd) return ci
def statistics(self): """ Do statistical analysis of population and set 'statted' to True """ if self.statted: return logging.debug("Running statistical calculations") raw_sum = 0 fit_sum = 0 len_pop = len(self) for ind in xrange(len_pop): raw_sum += self[ind].score #fit_sum += self[ind].fitness self.stats["rawMax"] = max(self, key=key_raw_score).score self.stats["rawMin"] = min(self, key=key_raw_score).score self.stats["rawAve"] = raw_sum / float(len_pop) self.stats["Best-Fscore"] = max(self, key=key_raw_score).fscore self.stats["Best-Hamdist"] = max(self, key=key_raw_score).hamdist self.stats["Best-Accuracy"] = max(self, key=key_raw_score).accuracy #self.stats["rawTot"] = raw_sum #self.stats["fitTot"] = fit_sum tmpvar = 0.0 for ind in xrange(len_pop): s = self[ind].score - self.stats["rawAve"] s *= s tmpvar += s tmpvar /= float((len(self) - 1)) try: self.stats["rawDev"] = math_sqrt(tmpvar) except: self.stats["rawDev"] = 0.0 self.stats["rawVar"] = tmpvar self.statted = True
# You can import a module or object under a different name using the as keyword. This is mainly used when a module or object has a long or confusing name. # For example: #from math import sqrt as square_root #print(square_root(102)) # Result: 10 # My variation of code: from math import sqrt as math_sqrt i = float(input("Enter any number you wish to get sqaure root from: \n")) if i % 1 == 0: result = math_sqrt(int(i)) else: result = math_sqrt(i) if result % 1 == 0: result = int(result) print(result)
from scipy.optimize import minimize_scalar from math import fabs, sqrt as math_sqrt from sympy.plotting import plot3d if __name__ == "__main__": eps = 0.1 x1, x2, l = symbols("x1 x2 l") func = 10 * x1**2 + x2**2 X = (x1, x2) Xk = (10, 10) iteration = 1 grad_func = [diff(func, x_i) for x_i in X] grad_func_eval = [grad_func[i].subs(zip(X, Xk)) for i in range(len(X))] print(grad_func_eval) res = math_sqrt(sum([val**2 for val in grad_func_eval])) print(res) print("Iteration: ", iteration) while res > eps: iteration += 1 X_next = [Xk[i] - l * grad_func_eval[i] for i in range(len(X))] l_min = minimize_scalar(lambdify(l, func.subs(zip(X, X_next)))).x print("Step: ", l_min) X_next = [Xk[i] - l_min * grad_func_eval[i] for i in range(len(X))] print("Xk+1: ", X_next) grad_func_eval = [ grad_func[i].subs(zip(X, X_next)) for i in range(len(X)) ]
def handle_radius(self, msg, val): self.game[msg] = int(val) self.game[msg[:-1]] = math_sqrt(int(val))
def build_lists(self): ''' build 3 lists: faces, normals, vertices ''' init_time = time.time() # will return 3 lists vertices = [] faces = [] face_normals = [] vertex_normals = [] normals = [] #print v.GetNodeNormal() #dict_vert_normals={} # first, build nodes list: #nodes_this = [] #for i in range(self.get_nb_nodes()): # node = self._mesh_data_source.nodeValue(i) # vertices.append([node.X(),node.Y(), node.Z()]) # nodes_this.append(node.this) # Traverse faces for this triangular mesh j = 0 for i in range(self.get_nb_faces()): triangle_face = self._mesh_data_source.faceValue(i) # First node node_0 = triangle_face.GetNode(0) n0_X = node_0.X() n0_Y = node_0.Y() n0_Z = node_0.Z() vertices.append([n0_X, n0_Y, n0_Z]) # Second node node_1 = triangle_face.GetNode(1) n1_X = node_1.X() n1_Y = node_1.Y() n1_Z = node_1.Z() vertices.append([n1_X, n1_Y, n1_Z]) # Third node node_2 = triangle_face.GetNode(2) n2_X = node_2.X() n2_Y = node_2.Y() n2_Z = node_2.Z() vertices.append([n2_X, n2_Y, n2_Z]) # append faces indices faces.append([j, j + 1, j + 2]) j = j + 3 # Compute normal for this face # the cross product has to be computed u0 = n1_X - n0_X u1 = n1_Y - n0_Y u2 = n1_Z - n0_Z v0 = n2_X - n0_X v1 = n2_Y - n0_Y v2 = n2_Z - n0_Z n_x = u1 * v2 - u2 * v1 n_y = u2 * v0 - u0 * v2 n_z = u0 * v1 - u1 * v0 n_magnitude = math_sqrt(n_x * n_x + n_y * n_y + n_z * n_z) #print n_magnitude #P0 = gp_XYZ(n0_X,n0_Y,n0_Z) #P1 = gp_XYZ(n1_X,n1_Y,n1_Z) #P2 = gp_XYZ(n2_X,n2_Y,n1_Z) #face_normal = (P1-P0)^(P2-P0) #if face_normal.Modulus()>0: # face_normal.Normalize() #fn = [face_normal.X(),face_normal.Y(),face_normal.Z()] if n_magnitude > 0: fn = [n_x / n_magnitude, n_y / n_magnitude, n_z / n_magnitude] face_normals.append(fn) face_normals.append(fn) face_normals.append(fn) #print faces #faces = range(self.get_nb_faces()*3) #print faces print "build_list method performed in %f seconds." % (time.time() - init_time) self._vertices = vertices self._faces = faces self._face_normals = face_normals return True #return vertices, faces, face_normals
def r6_dnn_image_display(target_dirname, dnn_image_obj=None, show_fig=False): LOGGER.info('{}: r6: Turning upsampled envelope into image...'.format(target_dirname)) if dnn_image_obj is None: dnn_image_obj = loadmat(os_path_join(target_dirname, DNN_IMAGE_FNAME)) beam_position_x_up = dnn_image_obj['beam_position_x_up'] depth = dnn_image_obj['depth'] envUp_dB = dnn_image_obj['envUp_dB'] env_up = dnn_image_obj['envUp'] LOGGER.debug('{}: r6: Finished loading vars'.format(target_dirname)) x = np_squeeze(beam_position_x_up) # beam_position_x_up y = np_squeeze(depth) # depth LOGGER.debug('{}: r6: Finished squeezing x, y'.format(target_dirname)) fig, ax = plt_subplots() LOGGER.debug('{}: r6: Finished plt.figure'.format(target_dirname)) image = ax.imshow(envUp_dB, vmin=-60, vmax=0, cmap='gray', aspect='auto', extent=[x[0]*1000, x[-1]*1000, y[-1]*1000, y[0]*1000]) ax.set_aspect('equal') LOGGER.debug('{}: r6: Finished plt.imshow'.format(target_dirname)) fig.colorbar(image) LOGGER.debug('{}: r6: Finished plt.colorbar'.format(target_dirname)) # plt_xlabel('lateral (mm)', fontsize=FONT_SIZE) ax.set_xlabel('lateral (mm)', fontsize=FONT_SIZE) # plt_ylabel('axial (mm)', fontsize=FONT_SIZE) ax.set_ylabel('axial (mm)', fontsize=FONT_SIZE) LOGGER.debug('{}: r6: Finished plt.xlabel/ylabel'.format(target_dirname)) # if show_fig is True: # plt_show(block=False) # Save image to file dnn_image_path = os_path_join(target_dirname, DNN_IMAGE_SAVE_FNAME) fig.savefig(dnn_image_path) plt_close(fig) LOGGER.debug('{}: r6: Finished saving figure'.format(target_dirname)) # scan_battery_dirname = os_path_dirname(target_dirname) # process_scripts_dirpath = os_path_join(scan_battery_dirname, PROCESS_SCRIPTS_DIRNAME) # circle_radius = load_single_value(process_scripts_dirpath, CIRCLE_RADIUS_FNAME) # circle_coords_x = load_single_value(process_scripts_dirpath, CIRCLE_COORDS_X_FNAME) # circle_coords_y = load_single_value(process_scripts_dirpath, CIRCLE_COORDS_Y_FNAME) # xx, yy = np_meshgrid(x, y) # mask_in = get_circular_mask(xx, yy, (circle_coords_x, circle_coords_y), circle_radius) # # # mask_in # # create rectangular region outside lesion # box_xmin_right = load_single_value(process_scripts_dirpath, BOX_XMIN_RIGHT_FNAME) # box_xmax_right = load_single_value(process_scripts_dirpath, BOX_XMAX_RIGHT_FNAME) # # box_xmin_left = load_single_value(process_scripts_dirpath, BOX_XMIN_LEFT_FNAME) # box_xmax_left = load_single_value(process_scripts_dirpath, BOX_XMAX_LEFT_FNAME) # # # Box shares y position and height with circle (diameter) # ymin = circle_coords_y - circle_radius # ymax = circle_coords_y + circle_radius # mask_out_left = (xx >= box_xmin_left) * (xx <= box_xmax_left) * (yy >= ymin) * (yy <= ymax) # mask_out_right = get_rectangle_mask(xx, yy, box_xmin_right, box_xmax_right, ymin, ymax) # mask_out = mask_out_left | mask_out_right # Display circle and boxes # with_circle = envUp_dB.copy() # # # with_circle[mask_out_left+mask_in+mask_out_right] = 0 # # plt.figure(figsize=(12,16)) # plt.imshow(with_circle, vmin=-60, vmax=0, cmap='gray', aspect='auto', extent = [beam_position_x_up[0]*1000,beam_position_x_up[-1]*1000,depth[-1]*1000, depth[0]*1000]) # plt.colorbar() # FONT_SIZE = 20 # plt.xlabel('lateral (mm)', fontsize=FONT_SIZE) # plt.ylabel('axial (mm)', fontsize=FONT_SIZE) # plt.show() # Calculate image statistics # print('r6: env_up.shape =', env_up.shape) mask_in, mask_out = get_masks(target_dirname) LOGGER.debug('{}: r6: Finished loading masks'.format(target_dirname)) # print('r6: mask_in.shape={}, mask_out.shape={}'.format(mask_in.shape, mask_out.shape)) env_up_inside_lesion = env_up[mask_in] mean_in = env_up_inside_lesion.mean() var_in = env_up_inside_lesion.var(ddof=1) # ddof is important cuz Matlab env_up_outside_lesion = env_up[mask_out] mean_out = env_up_outside_lesion.mean() var_out = env_up_outside_lesion.var(ddof=1) # ddof is important cuz Matlab LOGGER.debug('{}: r6: Finished mean and var calculations'.format(target_dirname)) CR = -20 * math_log10(mean_in / mean_out) CNR = 20 * math_log10(abs(mean_in - mean_out)/math_sqrt(var_in + var_out)) SNR = mean_out / math_sqrt(var_out) LOGGER.debug('{}: r6: Finished speckle stats calculations'.format(target_dirname)) # Save image statistics to file speckle_stats = [CR, CNR, SNR, mean_in, mean_out, var_in, var_out] speckle_stats_path = os_path_join(target_dirname, SPECKLE_STATS_FNAME) with open(speckle_stats_path, 'w') as f: f.write("\n".join([str(item) for item in speckle_stats])) LOGGER.debug('{}: r6: Finished saving .txt'.format(target_dirname)) # Also save image statistics json as a redundant (but more readable) method speckle_stats_dict = { 'CR': CR, 'CNR': CNR, 'SNR': SNR, 'mean_inside_lesion': mean_in, 'variance_inside_lesion': var_in, 'mean_outside_lesion': mean_out, 'variance_outside_lesion': var_out, } speckle_stats_dict_path = os_path_join(target_dirname, SPECKLE_STATS_DICT_FNAME) with open(speckle_stats_dict_path, 'w') as f: json_dump(speckle_stats_dict, f, indent=4) LOGGER.debug('{}: r6: Finished saving .json'.format(target_dirname)) LOGGER.info('{}: r6 Done'.format(target_dirname))
def build_lists(self): ''' build 3 lists: faces, normals, vertices ''' init_time = time.time() # will return 3 lists vertices = [] faces = [] face_normals=[] vertex_normals = [] normals = [] #print v.GetNodeNormal() #dict_vert_normals={} # first, build nodes list: #nodes_this = [] #for i in range(self.get_nb_nodes()): # node = self._mesh_data_source.nodeValue(i) # vertices.append([node.X(),node.Y(), node.Z()]) # nodes_this.append(node.this) # Traverse faces for this triangular mesh j = 0 for i in range(self.get_nb_faces()): triangle_face = self._mesh_data_source.faceValue(i) # First node node_0 = triangle_face.GetNode(0) n0_X = node_0.X() n0_Y = node_0.Y() n0_Z = node_0.Z() vertices.append([n0_X,n0_Y,n0_Z]) # Second node node_1 = triangle_face.GetNode(1) n1_X = node_1.X() n1_Y = node_1.Y() n1_Z = node_1.Z() vertices.append([n1_X,n1_Y,n1_Z]) # Third node node_2 = triangle_face.GetNode(2) n2_X = node_2.X() n2_Y = node_2.Y() n2_Z = node_2.Z() vertices.append([n2_X,n2_Y,n2_Z]) # append faces indices faces.append([j,j+1,j+2]) j = j + 3 # Compute normal for this face # the cross product has to be computed u0 = n1_X-n0_X u1 = n1_Y-n0_Y u2 = n1_Z-n0_Z v0 = n2_X-n0_X v1 = n2_Y-n0_Y v2 = n2_Z-n0_Z n_x = u1*v2-u2*v1 n_y = u2*v0-u0*v2 n_z= u0*v1-u1*v0 n_magnitude = math_sqrt(n_x*n_x+n_y*n_y+n_z*n_z) #print n_magnitude #P0 = gp_XYZ(n0_X,n0_Y,n0_Z) #P1 = gp_XYZ(n1_X,n1_Y,n1_Z) #P2 = gp_XYZ(n2_X,n2_Y,n1_Z) #face_normal = (P1-P0)^(P2-P0) #if face_normal.Modulus()>0: # face_normal.Normalize() #fn = [face_normal.X(),face_normal.Y(),face_normal.Z()] if n_magnitude>0: fn = [n_x/n_magnitude, n_y/n_magnitude, n_z/n_magnitude] face_normals.append(fn) face_normals.append(fn) face_normals.append(fn) #print faces #faces = range(self.get_nb_faces()*3) #print faces print "build_list method performed in %f seconds."%(time.time()-init_time) self._vertices = vertices self._faces = faces self._face_normals = face_normals return True#return vertices, faces, face_normals
def magnitude(self): return math_sqrt(self.x**2 + self.y**2)
def __abs__(self): return math_sqrt(Vector2.dot(self, self))
def get_sqrt(num: float) -> float: print('Getting square root of', num) return math_sqrt(num)