def tst_identical(self): from dials.algorithms.integration.fit import fit_profile from scitbx.array_family import flex from tst_profile_helpers import gaussian # Create profile p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2)) s = flex.sum(p) p = p / s # Copy profile c = p.deep_copy() b = flex.double(flex.grid(9, 9, 9), 0) m = flex.bool(flex.grid(9,9,9), True) # Fit fit = fit_profile(p, m, c, b) I = fit.intensity() V = fit.variance() # Test intensity is the same eps = 1e-7 assert(abs(I - flex.sum(p)) < eps) assert(abs(V - I) < eps) print 'OK'
def compute_functional_and_gradients(self): # HATTNE does never enter this function print "HATTNE entering mark1.compute_functional_and_gradients" self.model_calcx = self.spotcx.deep_copy() self.model_calcy = self.spotcy.deep_copy() for x in xrange(64): selection = self.selections[x] self.model_calcx.set_selected(selection, self.model_calcx + self.x[2 * x]) self.model_calcy.set_selected(selection, self.model_calcy + self.x[2 * x + 1]) squares = self.delrsq_functional(self.model_calcx, self.model_calcy) f = flex.sum(squares) calc_obs_diffx = self.model_calcx - self.spotfx calc_obs_diffy = self.model_calcy - self.spotfy gradients = flex.double([0.0] * 128) for x in xrange(64): selection = self.selections[x] gradients[2 * x] = 2.0 * flex.sum(calc_obs_diffx.select(selection)) gradients[2 * x + 1] = 2.0 * flex.sum(calc_obs_diffy.select(selection)) print "Functional ", math.sqrt(flex.mean(squares)) return f, gradients
def _beam_direction_variance_list(self, detector, reflections): '''Calculate the variance in beam direction for each spot. Params: reflections The list of reflections Returns: The list of variances ''' from scitbx.array_family import flex # Get the reflection columns shoebox = reflections['shoebox'] bbox = reflections['bbox'] xyz = reflections['xyzobs.px.value'] # Loop through all the reflections variance = [] for r in range(len(reflections)): # Get the coordinates and values of valid shoebox pixels mask = shoebox[r].mask != 0 coords = shoebox[r].coords(mask) values = shoebox[r].values(mask) s1 = shoebox[r].beam_vectors(detector, mask) # Calculate the beam vector at the centroid panel = shoebox[r].panel s1_centroid = detector[panel].get_pixel_lab_coord(xyz[r][0:2]) angles = s1.angle(s1_centroid, deg=False) variance.append(flex.sum(values * (angles**2)) / (flex.sum(values) - 1)) # Return a list of variances return flex.double(variance)
def r_split(self, other, assume_index_matching=False, use_binning=False): # Used in Boutet et al. (2012), which credit it to Owen et al # (2006). See also R_mrgd_I in Diederichs & Karplus (1997)? # Barends cites Collaborative Computational Project Number 4. The # CCP4 suite: programs for protein crystallography. Acta # Crystallogr. Sect. D-Biol. Crystallogr. 50, 760-763 (1994) and # White, T. A. et al. CrystFEL: a software suite for snapshot # serial crystallography. J. Appl. Cryst. 45, 335–341 (2012). if not use_binning: assert other.indices().size() == self.indices().size() if self.data().size() == 0: return None if assume_index_matching: (o, c) = (self, other) else: (o, c) = self.common_sets(other=other, assert_no_singles=True) # The case where the denominator is less or equal to zero is # pathological and should never arise in practice. den = flex.sum(flex.abs(o.data() + c.data())) assert den > 0 return math.sqrt(2) * flex.sum(flex.abs(o.data() - c.data())) / den assert self.binner is not None results = [] for i_bin in self.binner().range_all(): sel = self.binner().selection(i_bin) results.append( r_split(self.select(sel), other.select(sel), assume_index_matching=assume_index_matching, use_binning=False) ) return binned_data(binner=self.binner(), data=results, data_fmt="%7.4f")
def tst_with_flat_background(self): from dials.algorithms.integration.fit import fit_profile from scitbx.array_family import flex from tst_profile_helpers import gaussian # Create profile p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2)) s = flex.sum(p) p = p / s # Copy profile c0 = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2)) b = flex.double(flex.grid(9, 9, 9), 5) m = flex.bool(flex.grid(9,9,9), True) c = c0 + b # Fit fit = fit_profile(p, m, c, b) I = fit.intensity() V = fit.variance() # Test intensity is the same eps = 1e-7 assert(abs(I - flex.sum(c0)) < eps) assert(abs(V - (flex.sum(c0) + flex.sum(b))) < eps) print 'OK'
def r1_factor(self, other, scale_factor=None, assume_index_matching=False, use_binning=False): """Get the R1 factor according to this formula .. math:: R1 = \dfrac{\sum{||F| - k|F'||}}{\sum{|F|}} where F is self.data() and F' is other.data() and k is the factor to put F' on the same scale as F""" assert not use_binning or self.binner() is not None assert other.indices().size() == self.indices().size() if not use_binning: if self.data().size() == 0: return None if assume_index_matching: o, c = self, other else: o, c = self.common_sets(other=other, assert_no_singles=True) o = flex.abs(o.data()) c = flex.abs(c.data()) if scale_factor is None: den = flex.sum(c * c) if den != 0: c *= flex.sum(o * c) / den elif scale_factor is not None: c *= scale_factor return flex.sum(flex.abs(o - c)) / flex.sum(o) results = [] for i_bin in self.binner().range_all(): sel = self.binner().selection(i_bin) results.append(r1_factor(self.select(sel), other.select(sel), scale_factor.data[i_bin], assume_index_matching)) return binned_data(binner=self.binner(), data=results, data_fmt="%7.4f")
def compute_functional_and_gradients(self): from scitbx.array_family import flex import math self.model_mean_x = flex.double(len(self.observed_x)) self.model_mean_y = flex.double(len(self.observed_x)) for x in xrange(6): selection = (self.master_groups==x) self.model_mean_x.set_selected(selection, self.x[2*x]) self.model_mean_y.set_selected(selection, self.x[2*x+1]) delx = self.observed_x - self.model_mean_x dely = self.observed_y - self.model_mean_y delrsq = delx*delx + dely*dely f = flex.sum(delrsq) gradients = flex.double([0.]*12) for x in xrange(6): selection = (self.master_groups==x) gradients[2*x] = -2. * flex.sum( delx.select(selection) ) gradients[2*x+1]=-2. * flex.sum( dely.select(selection) ) if self.verbose: print "Functional ",math.sqrt(flex.mean(delrsq)) self.count_iterations += 1 return f,gradients
def Hn(m): m_ = m sc = math.log(m_.size()) s = m_>0 m_ = m_.select(s.iselection()) m_ = m_/flex.sum(m_) return -flex.sum(m_*flex.log(m_))/sc
def __init__(OO): #OK let's figure stuff out about the multiphoton residual, after fitting with 0 + 1 photons # only count the residual for x larger than one_mean + 3*zero_sigma x = histogram.slot_centers() y_calc = flex.double(x.size(), 0) for g in gaussians: y_calc += g(x) xfloor = gaussians[1].params[1] + 3.*gaussians[0].params[2] selection = (histogram.slot_centers()>xfloor) OO.fit_xresid = histogram.slot_centers().select(selection) OO.fit_yresid = histogram.slots().as_double().select(selection) - y_calc.select(selection) OO.xweight = (OO.fit_xresid - gaussians[0].params[1])/(gaussians[1].params[1] - gaussians[0].params[1]) OO.additional_photons = flex.sum( OO.xweight * OO.fit_yresid ) #Now the other half of the data; the part supposedly fit by the 0- and 1-photon gaussians OO.qual_xresid = histogram.slot_centers().select(~selection) ysignal = histogram.slots().as_double().select(~selection) OO.qual_yresid = ysignal - y_calc.select(~selection) # Not sure how to treat weights for channels with zero observations; default to 1 _variance = ysignal.deep_copy().set_selected(ysignal==0., 1.) OO.weight = 1./_variance OO.weighted_numerator = OO.weight * (OO.qual_yresid * OO.qual_yresid) OO.sumsq_signal = flex.sum(ysignal * ysignal) OO.sumsq_residual = flex.sum(OO.qual_yresid * OO.qual_yresid)
def tst_with_flat_background_partial(self): from dials.algorithms.integration.fit import fit_profile from scitbx.array_family import flex from tst_profile_helpers import gaussian # Create profile p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2)) s = flex.sum(p) p = p / s # Copy profile c0 = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2)) b = flex.double(flex.grid(9, 9, 9), 5) m = flex.bool(flex.grid(9,9,9), True) c = c0 + b # Get the partial profiles pp = p[0:5,:,:] mp = m[0:5,:,:] cp = c[0:5,:,:] bp = b[0:5,:,:] c0p = c0[0:5,:,:] # Fit fit = fit_profile(pp, mp, cp, bp) I = fit.intensity() V = fit.variance() # Test intensity is the same eps = 1e-7 assert(abs(I - flex.sum(c0)) < eps) assert(abs(V - (flex.sum(c0p) + flex.sum(bp))) < eps) print 'OK'
def circ_mean (t, deg=True) : assert (len(t) > 0) from scitbx.array_family import flex if (deg) : t = math.pi * (t/180) sx = flex.sum(flex.cos(t)) / len(t) sy = flex.sum(flex.sin(t)) / len(t) return math.degrees(math.atan2(sy, sx))
def circ_len (t, deg=True) : assert (len(t) > 0) from scitbx.array_family import flex if (deg) : t = math.pi * (t/180) sx = flex.sum(flex.cos(t)) / len(t) sy = flex.sum(flex.sin(t)) / len(t) return math.sqrt(sx**2 + sy**2)
def r_factor(x,y, use_scale): try: x = flex.abs(x.data()) y = flex.abs(y.data()) except Exception: pass sc=1 if(use_scale): sc = scale(x,y) return flex.sum(flex.abs(x-sc*y))/flex.sum(x)
def compute_functional_and_gradients(self): slope = self.x[0] y_intercept = self.x[1] y_calc = slope * self.x_obs + y_intercept y_diff = self.y_obs - y_calc f = flex.sum(flex.pow2(y_diff)) g = flex.double([ flex.sum(-2 * y_diff * self.x_obs), flex.sum(-2 * y_diff)]) return f, g
def compute_functional_and_gradients(self): y_calc = self.compute_y_calc() delta_y = self.y_obs - y_calc f = flex.sum(flex.pow2(delta_y)) g = flex.double() for funct in self.functions: partial_ders = funct.partial_derivatives(self.x_obs) for i, partial in enumerate(partial_ders): g.append(-2 * flex.sum(delta_y * partial)) return f, g
def scale(x, y): assert type(x) == type(y) if(type(x) == miller.array): x = x.data() y = y.data() x = flex.abs(x) y = flex.abs(y) d = flex.sum(y*y) if d == 0: return 1 else: return flex.sum(x*y)/d
def npp(hklin): from iotbx.reflection_file_reader import any_reflection_file from xia2.Toolkit.NPP import npp_ify, mean_variance from scitbx.array_family import flex import math import sys reader = any_reflection_file(hklin) mtz_object = reader.file_content() intensities = [ma for ma in reader.as_miller_arrays(merge_equivalents=False) if ma.info().labels == ['I', 'SIGI']][0] indices = intensities.indices() # merging: use external variance i.e. variances derived from SIGI column merger = intensities.merge_equivalents(use_internal_variance=False) mult = merger.redundancies().data() imean = merger.array() unique = imean.indices() iobs = imean.data() # scale up variance to account for sqrt(multiplicity) effective scaling variobs = (imean.sigmas() ** 2) * mult.as_double() all = flex.double() cen = flex.double() for hkl, i, v, m in zip(unique, iobs, variobs, mult): # only consider if meaningful number of observations if m < 3: continue sel = indices == hkl data = intensities.select(sel).data() assert(m == len(data)) _x, _y = npp_ify(data, input_mean_variance=(i,v)) # perform linreg on (i) all data and (ii) subset between +/- 2 sigma sel = (flex.abs(_x) < 2) _x_ = _x.select(sel) _y_ = _y.select(sel) fit_all = flex.linear_regression(_x, _y) fit_cen = flex.linear_regression(_x_, _y_) all.append(fit_all.slope()) cen.append(fit_cen.slope()) print '%3d %3d %3d' % hkl, '%.2f %.2f %.2f' % (i, v, i/math.sqrt(v)), \ '%.2f %.2f' % (fit_all.slope(), fit_cen.slope()), '%d' % m sys.stderr.write('Mean gradients: %.2f %.2f\n' % (flex.sum(all) / all.size(), flex.sum(cen) / cen.size()))
def scale_down_image(in_image, out_image, scale_factor): '''Read in the data from in_image, apply the statistically valid scale factor to the data & write this out as out_image; retain the header as we go.''' image, header = read_image_to_flex_array(in_image) scaled_image = scale_down_array(image.as_1d(), scale_factor) from scitbx.array_family import flex sum_image = flex.sum(image.as_1d().select(image.as_1d() > 0)) sum_scaled_image = flex.sum(scaled_image.select(scaled_image > 0)) write_image_from_flex_array(out_image, scaled_image, header) return
def compute_functional_and_gradients(self): self.a = self.x y_calc = flex.double(self.x_obs.size(), 0) for i in range(self.n): y_calc = y_calc + (self.a[i]) * flex.pow(self.x_obs, i) y_diff = self.y_obs - y_calc f = flex.sum(y_diff * y_diff / self.w_obs) g = flex.double(self.n, 0) for i in range(self.n): g[i] = -flex.sum(2.0 * (y_diff / self.w_obs) * flex.pow(self.x_obs, i)) print f return f, g
def calc_k(fo, fc): "scale factor for (fo-k*fc)**2, only similar to factor for abs(fo-k*fc)" from scitbx.array_family import flex k_num = flex.sum(fo * fc) k_den = flex.sum(fc * fc) assert k_den != 0 assert k_den**2 != 0 k = k_num / k_den k_d_num = fo * k_den - k_num * 2 * fc k_d = k_d_num / k_den**2 k_d2 = -2 * (k_num * k_den + 2 * k_d_num * fc) / k_den**3 return k, k_d, k_d2
def _rmsds_core(self, reflections): """calculate unweighted RMSDs for the specified reflections""" resid_x = flex.sum(reflections['x_resid2']) resid_y = flex.sum(reflections['y_resid2']) resid_phi = flex.sum(reflections['phi_resid2']) n = len(reflections) rmsds = (sqrt(resid_x / n), sqrt(resid_y / n), sqrt(resid_phi / n)) return rmsds
def process_one_gradient(result): # copy gradients out of the result dX = result[self._grad_names[0]] dY = result[self._grad_names[1]] dZ = result[self._grad_names[2]] # reset result for k in result.keys(): result[k] = None # add new keys grads = self._concatenate_gradients(dX, dY, dZ) result['dL_dp'] = flex.sum(w_resid * grads) result['curvature'] = flex.sum(weights * grads * grads) return result
def wR2(self, cutoff_factor=None): if cutoff_factor is None: return math.sqrt(2*self.objective_data_only) fo_sq = self.observations.fo_sq strong = fo_sq.data() >= cutoff_factor*fo_sq.sigmas() fo_sq = fo_sq.select(strong) fc_sq = self.fc_sq.select(strong) wght = self.weights.select(strong) fc_sq = fc_sq.data() fo_sq = fo_sq.data() fc_sq *= self.scale_factor() wR2 = flex.sum(wght*flex.pow2((fo_sq-fc_sq)))/flex.sum(wght*flex.pow2(fo_sq)) return math.sqrt(wR2)
def _beam_direction_variance_list(self, detector, reflections, centroid_definition="s1"): """Calculate the variance in beam direction for each spot. Params: detector The detector model reflections The list of reflections centroid_definition ENUM com or s1 Returns: The list of variances """ from scitbx.array_family import flex # Get the reflection columns shoebox = reflections["shoebox"] xyz = reflections["xyzobs.px.value"] # Loop through all the reflections variance = [] if centroid_definition == "com": # Calculate the beam vector at the centroid s1_centroid = [] for r in range(len(reflections)): panel = shoebox[r].panel s1_centroid.append(detector[panel].get_pixel_lab_coord( xyz[r][0:2])) else: s1_centroid = reflections["s1"] for r in range(len(reflections)): # Get the coordinates and values of valid shoebox pixels # FIXME maybe I note in Kabsch (2010) s3.1 step (v) is # background subtraction, appears to be missing here. mask = shoebox[r].mask != 0 values = shoebox[r].values(mask) s1 = shoebox[r].beam_vectors(detector, mask) angles = s1.angle(s1_centroid[r], deg=False) if flex.sum(values) > 1: variance.append( flex.sum(values * (angles**2)) / (flex.sum(values) - 1)) # Return a list of variances return flex.double(variance)
def check_pixel_histogram_fit(hist, gaussians): assert gaussians is not None #if gaussians is None: ## Presumably the peak fitting failed in some way #print "Skipping pixel %s" %str(pixel) #continue zero_peak_diff = gaussians[0].params[1] if len(gaussians) < 2: raise PixelFitError("Only one gaussian!") y_obs = hist.slots().as_double() x = hist.slot_centers() y_calc = flex.double(y_obs.size(), 0) for g in gaussians: y_calc += g(x) residual = y_obs - y_calc # check the overall residual if flex.max(residual) / flex.sum(hist.slots()) > 0.015: raise PixelFitError("Bad fit residual: %f" % (flex.max(residual) / flex.sum(hist.slots()))) # check the residual around the zero photon peak zero_gaussian = gaussians[0] selection = ((x < zero_gaussian.params[1] + 1 * zero_gaussian.sigma)) #if ((flex.max(residual.select(selection))/flex.sum(hist.slots()) > 0.008) #or (flex.min(residual.select(selection))/flex.sum(hist.slots()) < -0.0067)): #raise PixelFitError("Bad fit residual around zero photon peak") # check the residual around the one photon peak one_gaussian = gaussians[1] selection = ((x > one_gaussian.params[1] - 1.4 * one_gaussian.sigma)) # & if selection.count(True) == 0: raise PixelFitError("Bad fit residual around one photon peak") max_residual_sel = flex.max(residual.select(selection)) if max_residual_sel > 20 and max_residual_sel > 1.2 * one_gaussian.params[ 0]: raise PixelFitError("Bad fit residual: %f" % max_residual_sel) gain = gaussians[1].params[1] - gaussians[0].params[1] if 0 and estimated_gain is not None and abs( gain - estimated_gain) > 0.5 * estimated_gain: print "bad gain!!!!!", pixel, gain #elif (one_gaussian.sigma / zero_gaussian.sigma) > 1.9: #raise PixelFitError("Bad sigma ratio: %.1f, %.1f" %(one_gaussian.sigma, zero_gaussian.sigma)) elif gaussians[1].sigma < (0.5 * gaussians[0].sigma): raise PixelFitError("Bad sigma: %f" % gaussians[1].sigma) elif gain < (4 * gaussians[0].sigma): raise PixelFitError("Bad gain: %f" % gain) elif gain > (20 * gaussians[0].sigma): # XXX is 20 to low? raise PixelFitError("Bad gain: %f" % gain)
def compute_functional_and_gradients(self): self.a = self.x residuals = self.datay - self.a[0] * self.datax * self.datax - self.a[ 1] * self.datax - self.a[2] f = flex.sum(0.5 * residuals * residuals) g = flex.double(self.n) dR_da = -self.datax * self.datax dR_db = -self.datax dR_dc = flex.double(len(self.datax), -1) g[0] = flex.sum(residuals * dR_da) g[1] = flex.sum(residuals * dR_db) g[2] = flex.sum(residuals * dR_dc) # self.print_step("LBFGS stp",f) return f, g
def _rmsds_core(self, reflections): """calculate unweighted RMSDs for the specified reflections""" resid_x = flex.sum(reflections["x_resid2"]) resid_y = flex.sum(reflections["y_resid2"]) resid_phi = flex.sum(reflections["phi_resid2"]) n = len(reflections) rmsds = ( math.sqrt(resid_x / n), math.sqrt(resid_y / n), math.sqrt(resid_phi / n), ) return rmsds
def r_merge_per_batch(pairs): """Calculate R_merge for the list of (merged-I, I) pairs.""" merged_indices, unmerged_indices = zip(*pairs) unmerged_Ij = intensities.data().select(flex.size_t(unmerged_indices)) merged_Ij = merged_intensities.data().select(flex.size_t(merged_indices)) numerator = flex.sum(flex.abs(unmerged_Ij - merged_Ij)) denominator = flex.sum(unmerged_Ij) if denominator > 0: return numerator / denominator return 0
def wR2(self, cutoff_factor=None): if cutoff_factor is None: return math.sqrt(2 * self.objective_data_only) fo_sq = self.observations.fo_sq strong = fo_sq.data() >= cutoff_factor * fo_sq.sigmas() fo_sq = fo_sq.select(strong) fc_sq = self.fc_sq.select(strong) wght = self.weights.select(strong) fc_sq = fc_sq.data() fo_sq = fo_sq.data() fc_sq *= self.scale_factor() wR2 = flex.sum(wght * flex.pow2( (fo_sq - fc_sq))) / flex.sum(wght * flex.pow2(fo_sq)) return math.sqrt(wR2)
def enlarge(data,factor,sigma=0.1,full=False): n,n = data.focus() m = int(n*factor) x = flex.double() y = flex.double() vals = flex.double() sigma=sigma*factor new_data = flex.double( flex.grid(m,m), -9 ) visited = flex.bool( flex.grid(m,m), False ) oo = n/2.0 for ii in range(n): for jj in range(n): dd = smath.sqrt( (ii-oo)**2.0 + (jj-oo)**2.0 ) if dd <= oo: nx = ii*factor ny = jj*factor x.append( nx ) y.append( ny ) vals.append( data[ (ii,jj) ] ) new_data[ (int(nx), int(ny)) ] = data[ (ii,jj) ] if not full: visited[ (int(nx), int(ny)) ] = True not_visited = ~visited not_visited = not_visited.iselection() # now we need to loop over all non-visited pixel values for pixel in not_visited: nv = -9 index = n_dim_index_from_one_dim(pixel, [m,m] ) nvx = index[1] nvy = index[0] dx = x-nvx dy = y-nvy dd = (dx*dx+dy*dy) ss = flex.exp( -dd/(sigma*sigma) ) nv = flex.sum(ss*vals)/(1e-12+flex.sum(ss)) new_data[ (nvx,nvy) ] = nv visited[ (nvx,nvy) ] = True #print nvx, nvy, nv not_visited = ~visited not_visited = not_visited.iselection() #print not_visited.size() return new_data oo=m/2.0 for ii in range(m): for jj in range(m): dd = smath.sqrt( (ii-oo)**2.0 + (jj-oo)**2.0 ) new_data[ (ii,jj) ] = new_data[ (ii,jj) ]/(1+smath.sqrt(dd)) return new_data
def goodness_of_fit(self): """Calculate various goodness of fit metrics (assumes fit has been performed already)""" a_sq = self.param[0] model_y = flex.sqrt(flex.pow2(self.x) + a_sq) resid = model_y - self.y resid2 = flex.pow2(resid) sse = flex.sum(resid2) sst = flex.sum(flex.pow2(model_y - flex.mean(model_y))) r_sq = 1 - sse / sst rmse = sqrt(sse / (self.n_data - 1)) return {"SSE": sse, "R-square": r_sq, "RMSE": rmse}
def accumulate(self, x, y): """Accumulate the `x` and `y` values provided. Args: x (list): The list of `x` values to accumulate. y (list): The list of `y` values to accumulate. """ assert x.size() == y.size() self._n += x.size() self._sum_x += flex.sum(x) self._sum_y += flex.sum(y) self._sum_xy += flex.sum(x * y) self._sum_x_sq += flex.sum(flex.pow2(x)) self._sum_y_sq += flex.sum(flex.pow2(y))
def target(self,vector): scales, offsets = self.get_scales_offsets(vector) dr = self.get_mean(scales,offsets) result = 0 for jj in xrange(self.n_sets): dj = scales[jj]*(self.means[jj]+offsets[jj]) vj = self.vars[jj]*scales[jj]*scales[jj] t = flex.pow((dj-dr),2)/( 1e-13 + vj ) if self.n_sets != 2: result += flex.sum( t ) else: if jj != self.ref: vr = self.vars[self.ref] result += flex.sum(flex.pow((dj-dr),2)/( 1e-13 + vj + vr )) return result
def compute_functional_and_gradients(self): self.a = self.x f = 0.; g = flex.double(self.n) vector_T = flex.double(len(self.SP),self.x[0]) + self.SP*self.x[1] + self.FP*self.x[2] + 0.5*( self.SS*self.x[3] + self.SF*self.x[4] + self.FF*self.x[5]) vector_lambda = vector_T/self.gain f = flex.sum(vector_lambda - (self.KI * flex.log(vector_lambda))) inner_paren = flex.double(len(self.SP),1.) - (self.KI/vector_lambda) g_list = [flex.sum( deriv * inner_paren ) for deriv in [flex.double(len(self.SP),1.), self.SP, self.FP, self.SS, self.SF, self.FF]] #self.print_step("LBFGS stp",f) g_list[3]=0.; g_list[4]=0.; g_list[5]=0. # turn off the 2nd-order Taylor term g = flex.double(g_list)/self.gain return f,g
def __init__(OO): slots = flex.double(histogram.astype(np.float64)) slot_centers = flex.double( range(self.work_params.first_slot_value, self.work_params.first_slot_value + len(histogram))) x = slot_centers y_calc = flex.double(x.size(), 0) for g in gaussians: y_calc += g(x) #figure a good window for plotting the residual, taken as 5 sigma away from extreme gaussian ceilings = [ gaussians[n].params[1] + 5. * gaussians[n].params[2] for n in range(len(gaussians)) ] ceiling = max(ceilings) floors = [ gaussians[n].params[1] - 5. * gaussians[n].params[2] for n in range(len(gaussians)) ] floor = min(floors) #print floors #print ceilings #print "floor",floor,"ceiling",ceiling #xfloor = gaussians[1].params[1] + 3.*gaussians[0].params[2] selection = (slot_centers < floor).__or__( slot_centers > ceiling) OO.fit_xresid = slot_centers.select(selection) OO.fit_yresid = slots.select(selection) - y_calc.select( selection) OO.xweight = (OO.fit_xresid - gaussians[0].params[1]) / ( gaussians[1].params[1] - gaussians[0].params[1]) OO.additional_photons = flex.sum(OO.xweight * OO.fit_yresid) #Now the other half of the data; the part supposedly fit by the 0- and 1-photon gaussians OO.qual_xresid = slot_centers.select(~selection) ysignal = slots.select(~selection) OO.qual_yresid = ysignal - y_calc.select(~selection) OO.qual_y_fit = y_calc.select(~selection) # Not sure how to treat weights for channels with zero observations; default to 1 _variance = ysignal.deep_copy().set_selected(ysignal == 0., 1.) OO.weight = 1. / _variance OO.weighted_numerator = OO.weight * (OO.qual_yresid * OO.qual_yresid) OO.sumsq_signal = flex.sum(ysignal * ysignal) OO.sumsq_residual = flex.sum(OO.qual_yresid * OO.qual_yresid)
def adjust_errors(self): """ Use the distribution of intensities in a given miller index to compute the error for each merged reflection """ print("Computing error estimates from sample residuals", file=self.log) self.scaler.summed_weight = flex.double(self.scaler.n_refl, 0.) self.scaler.summed_wt_I = flex.double(self.scaler.n_refl, 0.) for hkl_id in range(self.scaler.n_refl): hkl = self.scaler.miller_set.indices()[hkl_id] if hkl not in self.scaler.ISIGI: continue n = len(self.scaler.ISIGI[hkl]) if n <= 1: continue x = flex.double([self.scaler.ISIGI[hkl][i][0] for i in range(n)]) if self.scaler.params.raw_data.error_models.errors_from_sample_residuals.biased: m = flex.mean(x) variance = flex.sum((x - m)**2) / n else: variance = flex.mean_and_variance( x).unweighted_sample_variance() # flex.sum((x-m)**2)/(n-1) for i in range(n): Intensity = self.scaler.ISIGI[hkl][i][0] # scaled intensity self.scaler.summed_wt_I[hkl_id] += Intensity / variance self.scaler.summed_weight[hkl_id] += 1 / variance print("Done computing error estimates", file=self.log)
def target(self, vector): """Target function for the simplex optimisation""" scaled = self.transform(values=self.scl_values, params=vector) diff = (scaled - self.ref_values) diff_sq = diff * diff result = flex.sum(self.weight_array * diff_sq) return result
def plot_combo(self, pixel, gaussians, window_title=None, title=None, log_scale=False, normalise=False, save_image=False, interpretation=None): histogram = self.histograms[pixel] from matplotlib import pyplot from xfel.command_line.view_pixel_histograms import hist_outline slots = histogram.slots().as_double() if normalise: normalisation = (flex.sum(slots) + histogram.n_out_of_slot_range()) / 1e5 print "normalising by factor: ", normalisation slots /= normalisation bins, data = hist_outline(histogram) if log_scale: data.set_selected(data == 0, 0.1) # otherwise lines don't get drawn when we have some empty bins pyplot.yscale("log") pyplot.plot(bins, data, '-k', linewidth=2) pyplot.plot(bins, data/1000., '-k', linewidth=2) pyplot.suptitle(title) data_min = min([slot.low_cutoff for slot in histogram.slot_infos() if slot.n > 0]) data_max = max([slot.low_cutoff for slot in histogram.slot_infos() if slot.n > 0]) pyplot.xlim(data_min, data_max+histogram.slot_width()) pyplot.xlim(-50, 100) pyplot.ylim(-10, 40) x = histogram.slot_centers() for g in gaussians: print "Height %7.2f mean %4.1f sigma %3.1f"%(g.params) pyplot.plot(x, g(x), linewidth=2) if interpretation is not None: interpretation.plot_multiphoton_fit(pyplot) interpretation.plot_quality(pyplot) pyplot.show()
def target(self, dxyz): self.move_on = False chi_score = 1e30 if (True): for ii in range(50): dxyz = self.pdb_obj.beads.relax(self.restraints, dxyz) print ii, ' ', if (self.pdb_obj.beads.restraint(self.restraints, dxyz) < self.threshold): self.new_xyz = self.pdb_obj.perturb(dxyz) self.move_on = True break if (self.move_on): t1 = time.time() self.she_engine.engine.update_coord(self.new_xyz, self.new_indx) new_I = self.she_engine.engine.I() self.time_she += (time.time() - t1) var = self.expt_s s, o = she.linear_fit(new_I, self.expt_I, var) chi_score = flex.sum( flex.pow2((self.expt_I - (s * new_I + o))) / self.expt_s) #restraint=self.pdb_obj.beads.restraint(self.restraints, self.new_xyz) #tot = chi_score + restraint*self.restraint_weight print chi_score self.counter += 1 return chi_score
def single_peak_fit(self, hist, lower_threshold, upper_threshold, mean, zero_peak_gaussian=None): lower_slot = 0 for slot in hist.slot_centers(): lower_slot += 1 if slot > lower_threshold: break upper_slot = 0 for slot in hist.slot_centers(): upper_slot += 1 if slot > upper_threshold: break x = hist.slot_centers() y = hist.slots().as_double() starting_gaussians = [curve_fitting.gaussian( a=flex.max(y[lower_slot:upper_slot]), b=mean, c=3)] # print starting_gaussians #mamin: fit gaussian will take the maximum between starting point (lower_slot) and ending (upper_slot) as a if zero_peak_gaussian is not None: y -= zero_peak_gaussian(x) if 1: fit = curve_fitting.lbfgs_minimiser( starting_gaussians, x[lower_slot:upper_slot], y[lower_slot:upper_slot]) sigma = abs(fit.functions[0].params[2]) if sigma < 1 or sigma > 10: if flex.sum(y[lower_slot:upper_slot]) < 15: #mamin I changed 15 to 5 # No point wasting time attempting to fit a gaussian if there aren't any counts #raise PixelFitError("Not enough counts to fit gaussian") return fit print "using cma_es:", sigma fit = curve_fitting.cma_es_minimiser( starting_gaussians, x[lower_slot:upper_slot], y[lower_slot:upper_slot]) else: fit = curve_fitting.cma_es_minimiser( starting_gaussians, x[lower_slot:upper_slot], y[lower_slot:upper_slot]) return fit
def target(self, vector): scales, offsets = self.get_scales_offsets(vector) dr = self.get_mean(scales, offsets) result = 0 for jj in range(self.n_sets): dj = scales[jj] * (self.means[jj] + offsets[jj]) vj = self.vars[jj] * scales[jj] * scales[jj] t = flex.pow((dj - dr), 2) / (1e-13 + vj) if self.n_sets != 2: result += flex.sum(t) else: if jj != self.ref: vr = self.vars[self.ref] result += flex.sum( flex.pow((dj - dr), 2) / (1e-13 + vj + vr)) return result
def test_with_flat_background(): from dials.algorithms.integration.fit import ProfileFitter from scitbx.array_family import flex from numpy.random import seed seed(0) # Create profile p = gaussian((9, 9, 9), 1, (4, 4, 4), (2, 2, 2)) s = flex.sum(p) p = p / s # Copy profile c0 = add_poisson_noise(100 * p) b = flex.double(flex.grid(9, 9, 9), 10) m = flex.bool(flex.grid(9,9,9), True) b0 = add_poisson_noise(b) c = c0 + b0 # Fit fit = ProfileFitter(c, b, m, p) I = fit.intensity() V = fit.variance() assert fit.niter() < fit.maxiter() Iknown = 201.67417836585147 Vknown = 7491.6743173001205 # Test intensity is the same eps = 1e-3 assert(abs(I[0] - Iknown) < eps) assert(abs(V[0] - Vknown) < eps)
def test_with_identical_non_negative_profiles(self): from scitbx.array_family import flex # Generate identical non-negative profiles reflections, profiles, profile = self.generate_identical_non_negative_profiles( ) # Create the reference learner modeller = Modeller(self.n, self.grid_size, self.threshold) # Do the modelling modeller.model(reflections, profiles) modeller.finalize() # Normalize the profile profile = self.normalize_profile(profile) # Check that all the reference profiles are the same eps = 1e-10 for index in range(len(modeller)): reference = modeller.data(index) for k in range(self.grid_size[2]): for j in range(self.grid_size[1]): for i in range(self.grid_size[0]): assert abs(reference[k, j, i] - profile[k, j, i]) <= eps assert abs(flex.sum(reference) - 1.0) <= eps
def exercise_sim(out, n_dynamics_steps, delta_t, sim): sim.check_d_pot_d_q() e_pots = flex.double([sim.e_pot]) e_kins = flex.double([sim.e_kin]) for i_step in range(n_dynamics_steps): sim.dynamics_step(delta_t=delta_t) e_pots.append(sim.e_pot) e_kins.append(sim.e_kin) e_tots = e_pots + e_kins sim.check_d_pot_d_q() print("energy samples:", e_tots.size(), file=out) print("e_pot min, max:", min(e_pots), max(e_pots), file=out) print("e_kin min, max:", min(e_kins), max(e_kins), file=out) print("e_tot min, max:", min(e_tots), max(e_tots), file=out) print("start e_tot:", e_tots[0], file=out) print("final e_tot:", e_tots[-1], file=out) ave = flex.sum(e_tots) / e_tots.size() range_ = flex.max(e_tots) - flex.min(e_tots) if (ave == 0): relative_range = 0 else: relative_range = range_ / ave print("ave:", ave, file=out) print("range:", range_, file=out) print("relative range:", relative_range, file=out) print(file=out) out.flush() if (out is sys.stdout): f = open("tmp%02d.xy" % plot_number[0], "w") for es in [e_pots, e_kins, e_tots]: for e in es: print(e, file=f) print("&", file=f) f.close() plot_number[0] += 1 return relative_range
def generate_3_profiles(): from dials.array_family import flex p1 = gaussian((40, 9, 9), 1, (10.5, 4, 4), (2, 2, 2)) p2 = gaussian((40, 9, 9), 1, (20.5, 4, 4), (2, 2, 2)) p3 = gaussian((40, 9, 9), 1, (30.5, 4, 4), (2, 2, 2)) p1 = p1 / flex.sum(p1) p2 = p2 / flex.sum(p2) p3 = p3 / flex.sum(p3) p1.reshape(flex.grid(1, 40, 9, 9)) p2.reshape(flex.grid(1, 40, 9, 9)) p3.reshape(flex.grid(1, 40, 9, 9)) p = flex.double(flex.grid(3, 40, 9, 9)) p[0:1,:,:,:] = p1 p[1:2,:,:,:] = p2 p[2:3,:,:,:] = p3 return p
def embed(self, n_dimensions, n_points): x = [] for ii in range(n_points): x.append(flex.random_double(n_dimensions) * 100) l = float(self.l) for mm in range(self.max_cycle): atom_order = flex.sort_permutation(flex.random_double(len(x))) strain = 0.0 for ii in atom_order: n_contacts = len(self.dmat[ii]) jj_index = flex.sort_permutation( flex.random_double(n_contacts))[0] jj_info = self.dmat[ii][jj_index] jj = jj_info[0] td = jj_info[1] xi = x[ii] xj = x[jj] cd = math.sqrt(flex.sum((xi - xj) * (xi - xj))) new_xi = xi + l * 0.5 * (td - cd) / (cd + self.eps) * (xi - xj) new_xj = xj + l * 0.5 * (td - cd) / (cd + self.eps) * (xj - xi) strain += abs(cd - td) x[ii] = new_xi x[jj] = new_xj l = l - self.dl return x, strain / len(x)
def plot_one_histogram(self, histogram, window_title=None, title=None, log_scale=False, normalise=False, save_image=False): from matplotlib import pyplot slots = histogram.slots().as_double() if normalise: normalisation = (flex.sum(slots) + histogram.n_out_of_slot_range()) / 1e5 print "normalising by factor: ", normalisation slots /= normalisation bins, data = hist_outline(histogram) if log_scale: data.set_selected( data == 0, 0.1 ) # otherwise lines don't get drawn when we have some empty bins pyplot.yscale("log") pyplot.plot(bins, data, '-k', linewidth=2) pyplot.suptitle(title) data_min = min( [slot.low_cutoff for slot in histogram.slot_infos() if slot.n > 0]) data_max = max( [slot.low_cutoff for slot in histogram.slot_infos() if slot.n > 0]) pyplot.xlim(data_min, data_max + histogram.slot_width())
def plot_combo(self, histogram, gaussians, window_title=None, title=None, log_scale=False, normalise=False, save_image=False, interpretation=None): from matplotlib import pyplot #from xfel.command_line.view_pixel_histograms import hist_outline slots = flex.double(histogram.astype(np.float64)) if normalise: normalisation = (flex.sum(slots) + histogram.n_out_of_slot_range()) / 1e5 print "normalising by factor: ", normalisation slots /= normalisation slot_centers = flex.double(xrange(self.work_params.first_slot_value, self.work_params.first_slot_value + len(histogram))) bins, data = hist_outline(slot_width=1,slots=slots,slot_centers=slot_centers) if log_scale: data.set_selected(data == 0, 0.1) # otherwise lines don't get drawn when we have some empty bins pyplot.yscale("log") pyplot.plot(bins, data, '-k', linewidth=2) pyplot.plot(bins, data/1000., '-k', linewidth=2) pyplot.suptitle(title) #data_min = min([slot.low_cutoff for slot in histogram.slot_infos() if slot.n > 0]) #data_max = max([slot.low_cutoff for slot in histogram.slot_infos() if slot.n > 0]) #pyplot.xlim(data_min, data_max+histogram.slot_width()) pyplot.xlim(-50, 150) pyplot.ylim(-10, 40) x = slot_centers for g in gaussians: print "Height %7.2f mean %4.1f sigma %3.1f"%(g.params) pyplot.plot(x, g(x), linewidth=2) if interpretation is not None: interpretation.plot_multiphoton_fit(pyplot) interpretation.plot_quality(pyplot) pyplot.show()
def check_i_obs_vs_backup(O, work_params): print "Current i_obs vs. backup:" for im in O.array: im.backup.reset_partialities(work_params, O.miller_indices) b_obs = im.backup.extract_i_obs_est(work_params, O.miller_indices) im.reset_partialities(work_params, O.miller_indices) i_obs = im.extract_i_obs_est(work_params, O.miller_indices) max_common_size = -1 max_cb_ci = None for s in work_params.lattice_symmetry.group(): i_obs_cb = i_obs.change_basis(str(s)) cb, ci = b_obs.common_sets(other=i_obs_cb) common_size = cb.indices().size() if (max_common_size < common_size): max_common_size = common_size max_cb_ci = cb, ci assert max_cb_ci is not None cb, ci = max_cb_ci from scitbx.array_family import flex num = flex.sum(cb.data()*ci.data()) den = flex.sum_sq(cb.data()) if (den == 0): scale = None else: scale = num / den print " ", b_obs.indices().size(), i_obs.indices().size(), \ cb.indices().size(), scale print
def target(self, vector): """ Compute the functional by first applying the current values for the sd parameters to the input data, then computing the complete set of normalized deviations and finally using those normalized deviations to compute the functional.""" sdfac, sdb, sdadd = vector[0],0.0,vector[1] a_new_variance, b_new_variance = ccp4_model.apply_sd_error_params( vector, a_data, b_data, a_sigmas, b_sigmas) mean_num = (a_data/ (a_new_variance) ) + (b_data/ (b_new_variance) ) mean_den = (1./ (a_new_variance) ) + (1./ (b_new_variance) ) mean_values = mean_num / mean_den delta_I_a = a_data - mean_values normal_a = delta_I_a / flex.sqrt(a_new_variance) delta_I_b = b_data - mean_values normal_b = delta_I_b / flex.sqrt(b_new_variance) mean_order = flex.sort_permutation(mean_values) scatters = flex.double(50) scattersb = flex.double(50) for isubsection in range(50): subselect = mean_order[isubsection*len(mean_order)//50:(isubsection+1)*len(mean_order)//50] vals = normal_a.select(subselect) scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_variance() valsb = normal_b.select(subselect) scattersb[isubsection] = flex.mean_and_variance(valsb).unweighted_sample_variance() f = flex.sum( flex.pow(1.-scatters, 2) ) print "f: % 12.1f, sdfac: %8.5f, sdb: %8.5f, sdadd: %8.5f"%(f, sdfac, sdb, sdadd) return f
def build_cluster_pair_info(O, other, work_params, reindexing_assistant): from scitbx.array_family import flex scale_max = work_params.scale_estimation_scale_max assert scale_max > 0 scale_min = 1/scale_max miis_i, esti_i = O.miis_perms[0], O.esti_perms[0] result = [] for j_perm in xrange(len(reindexing_assistant.cb_ops)): miis_j, esti_j = other.miis_perms[j_perm], other.esti_perms[j_perm] i_seqs, j_seqs = miis_i.intersection_i_seqs(other=miis_j) if (i_seqs.size() < 2): return None x = esti_i.select(i_seqs) y = esti_j.select(j_seqs) if (((x != 0) | (y != 0)).count(True) < 2): return None num = flex.sum(x*y) den = flex.sum_sq(x) if (num > den * scale_min and num < den * scale_max): scale = num / den rms = flex.mean_sq(x*scale-y)**0.5 result.append(perm_rms_info(n=x.size(), scale=scale, rms=rms)) else: return None result = perm_rms_list(array=result) result.set_score() return result
def run(b_iso=10): xrs = iotbx.pdb.input(source_info=None, lines=pdb_str).xray_structure_simple() xrs = xrs.set_b_iso(value=b_iso) xrs.scattering_type_registry(table="n_gaussian", d_min=0., types_without_a_scattering_contribution=["?"]) n_real = [100, 100, 100] pixel_volume = xrs.unit_cell().volume() / (n_real[0] * n_real[1] * n_real[2]) map_data_3d = mmtbx.real_space.sampled_model_density( xray_structure=xrs, n_real=n_real).data() * pixel_volume dist, map_data_2d = maptbx.map_peak_3d_as_2d( map_data=map_data_3d, unit_cell=xrs.unit_cell(), center_cart=xrs.sites_cart()[0], radius=3.0) # map_data_2d_exact = flex.double() ed = xrs._scattering_type_registry.gaussian("Ca") for r in dist: map_data_2d_exact.append(ed.electron_density(r, b_iso)) map_data_2d_exact = map_data_2d_exact * pixel_volume # assert approx_equal(flex.sum(map_data_3d), 20, 0.1) # Page 556, Int.Tables. assert flex.mean(abs(map_data_2d - map_data_2d_exact)) < 1.e-4
def test_with_systematically_offset_profiles(self): from scitbx.array_family import flex # Generate identical non-negative profiles reflections, profiles = self.generate_systematically_offset_profiles() # Create the reference learner modeller = Modeller(self.n, self.grid_size, self.threshold) # Do the modelling modeller.model(reflections, profiles) modeller.finalize() # Check that all the reference profiles are the same eps = 1e-10 profile = None for index in range(len(modeller)): reference = modeller.data(index) if profile is not None: for k in range(self.grid_size[2]): for j in range(self.grid_size[1]): for i in range(self.grid_size[0]): assert abs(reference[k, j, i] - profile[k, j, i]) <= eps else: profile = reference assert abs(flex.sum(reference) - 1.0) <= eps
def compute_functional_and_gradients(self): # caculate difference between predicted and observed values self.distribution.set_parameters(p=self.x) is_cpp_ = getattr(self.distribution, "interface", "Python") == "C++" if is_cpp_: predicted = self.distribution.cdf(x=self.x_data) else: predicted = flex.double(self.n) for i in xrange(self.n): predicted[i] = self.distribution.cdf(x=self.x_data[i]) difference = predicted - self.y_data # target function for minimization is sum of rmsd f = flex.sum(flex.sqrt(difference * difference)) if is_cpp_: gradients = self.distribution.gradients(x=self.x_data, nparams=len(self.x), difference=difference) return f, gradients gradients = flex.double(len(self.x)) for i in xrange(self.n): g_i = self.distribution.cdf_gradients(x=self.x_data[i]) for j in xrange(len(self.x)): gradients[j] = gradients[j] + difference[i] * g_i[j] gradients = 2.0 * gradients return f, gradients
def compute_restraints_functional_gradients_and_curvatures(self): '''use the restraints_parameterisation object, if present, to calculate the least squares restraints objective plus gradients and approximate curvatures''' if not self._restraints_parameterisation: return None residuals, jacobian, weights = \ self._restraints_parameterisation.get_residuals_gradients_and_weights() w_resid = weights * residuals residuals2 = residuals * residuals # calculate target function L = 0.5 * flex.sum(weights * residuals2) # calculate gradients using the scalar product of sparse vector col with # dense vector w_resid dL_dp = [col * w_resid for col in jacobian.cols()] # calculate lsq approximation to curvatures using weighted dot product curvs = [ sparse.weighted_dot(col, weights, col) for col in jacobian.cols() ] return (L, dL_dp, curvs)
def embed(self,n_dimensions,n_points): x = [] for ii in range(n_points): x.append( flex.random_double(n_dimensions)*100 ) l = float(self.l) for mm in range(self.max_cycle): atom_order = flex.sort_permutation( flex.random_double(len(x)) ) strain = 0.0 for ii in atom_order: n_contacts = len(self.dmat[ii]) jj_index = flex.sort_permutation( flex.random_double( n_contacts ) )[0] jj_info = self.dmat[ii][jj_index] jj = jj_info[0] td = jj_info[1] xi = x[ii] xj = x[jj] cd = smath.sqrt( flex.sum( (xi-xj)*(xi-xj) ) ) new_xi = xi + l*0.5*(td-cd)/(cd+self.eps)*(xi-xj) new_xj = xj + l*0.5*(td-cd)/(cd+self.eps)*(xj-xi) strain += abs(cd-td) x[ii] = new_xi x[jj] = new_xj l = l-self.dl return x,strain/len(x)
def run(b_iso=10): xrs = iotbx.pdb.input(source_info=None, lines=pdb_str).xray_structure_simple() xrs = xrs.set_b_iso(value=b_iso) xrs.scattering_type_registry( table = "n_gaussian", d_min = 0., types_without_a_scattering_contribution=["?"]) n_real = [100,100,100] pixel_volume = xrs.unit_cell().volume()/(n_real[0]*n_real[1]*n_real[2]) map_data_3d = mmtbx.real_space.sampled_model_density( xray_structure = xrs, n_real = n_real).data()*pixel_volume dist, map_data_2d = maptbx.map_peak_3d_as_2d( map_data = map_data_3d, unit_cell = xrs.unit_cell(), center_cart = xrs.sites_cart()[0], radius = 3.0) # map_data_2d_exact = flex.double() ed = xrs._scattering_type_registry.gaussian("Ca") for r in dist: map_data_2d_exact.append(ed.electron_density(r, b_iso)) map_data_2d_exact = map_data_2d_exact * pixel_volume # assert approx_equal(flex.sum(map_data_3d), 20, 0.1) # Page 556, Int.Tables. assert flex.mean(abs(map_data_2d-map_data_2d_exact)) < 1.e-4
def num_int_triple_int(n1, n2, n3, m, np=1e5): r = flex.double(range(int(np + 1))) / np f1 = math.zernike_2d_radial(n1, m).f_array(r) f2 = math.zernike_2d_radial(n2, m).f_array(r) f3 = math.zernike_2d_radial(n3, m).f_array(r) result = flex.sum(f1 * f2 * f3 * r) / np return result