def exercise_increasing_dimensions(self): print("Scaling with m and m/n: ", end=' ') n_tests = 0 for sigma, a in self.matrices(): m, n = a.focus() if self.show_progress: if not n_tests: print() print((m, n), end=' ') sys.stdout.flush() svd = self.klass(a, self.accumulate_u, self.accumulate_v) if self.show_progress: print('!', end=' ') sys.stdout.flush() sigma = sigma.select(flex.sort_permutation(sigma, reverse=True)) delta = (svd.sigma - sigma).norm() / sigma.norm() / min( m, n) / self.eps assert delta < 10 n_tests += 1 if not self.exercise_tntbx: if self.show_progress: print() continue svd = tntbx.svd_m_ge_n_double(a) if self.show_progress: print('!') sys.stdout.flush() sigma = sigma.select(flex.sort_permutation(sigma, reverse=True)) delta = ((svd.singular_values() - sigma).norm() / sigma.norm() / min(m, n) / self.eps) assert delta < 10 if self.show_progress: print() print("%i done" % n_tests)
def embed(self,n_dimensions,n_points): x = [] for ii in range(n_points): x.append( flex.random_double(n_dimensions)*100 ) l = float(self.l) for mm in range(self.max_cycle): atom_order = flex.sort_permutation( flex.random_double(len(x)) ) strain = 0.0 for ii in atom_order: n_contacts = len(self.dmat[ii]) jj_index = flex.sort_permutation( flex.random_double( n_contacts ) )[0] jj_info = self.dmat[ii][jj_index] jj = jj_info[0] td = jj_info[1] xi = x[ii] xj = x[jj] cd = smath.sqrt( flex.sum( (xi-xj)*(xi-xj) ) ) new_xi = xi + l*0.5*(td-cd)/(cd+self.eps)*(xi-xj) new_xj = xj + l*0.5*(td-cd)/(cd+self.eps)*(xj-xi) strain += abs(cd-td) x[ii] = new_xi x[jj] = new_xj l = l-self.dl return x,strain/len(x)
def exercise_increasing_dimensions(self): print "Scaling with m and m/n: ", n_tests = 0 for sigma, a in self.matrices(): m, n = a.focus() if self.show_progress: if not n_tests: print print (m,n), sys.stdout.flush() svd = scitbx.linalg.svd.real(a, self.accumulate_u, self.accumulate_v) if self.show_progress: print '!', sys.stdout.flush() sigma = sigma.select(flex.sort_permutation(sigma, reverse=True)) delta = (svd.sigma - sigma).norm()/sigma.norm()/min(m,n)/self.eps assert delta < 10 n_tests += 1 if not self.exercise_tntbx: if self.show_progress: print continue svd = tntbx.svd_m_ge_n_double(a) if self.show_progress: print '!' sys.stdout.flush() sigma = sigma.select(flex.sort_permutation(sigma, reverse=True)) delta = ((svd.singular_values() - sigma).norm() /sigma.norm()/min(m,n)/self.eps) assert delta < 10 if self.show_progress: print print "%i done" % n_tests
def interpolate(x, y, half_window=10): perm = flex.sort_permutation(x) x = x.select(perm) y = y.select(perm) x_all = flex.double() y_all = flex.double() for i in range(x.size()): x_all.append(x[i]) y_all.append(y[i]) if i < x.size()-1 and (x[i+1] - x[i]) > 1: window_left = min(half_window, i) window_right = min(half_window, x.size() - i) x_ = x[i-window_left:i+window_right] y_ = y[i-window_left:i+window_right] from scitbx.math import curve_fitting # fit a 2nd order polynomial through the missing points polynomial = curve_fitting.univariate_polynomial(1, 1) fit = curve_fitting.lbfgs_minimiser([polynomial], x_,y_).functions[0] missing_x = flex.double(range(int(x[i]+1), int(x[i+1]))) x_all.extend(missing_x) y_all.extend(fit(missing_x)) perm = flex.sort_permutation(x_all) x_all = x_all.select(perm) y_all = y_all.select(perm) return x_all, y_all
def summary(self, top_models, top_scores, comment="---"): model_dict = {} score_dict = {} for models, scores in zip(top_models, top_scores): for m, i in zip(models, range(self.ntop)): if (model_dict.__contains__(m)): model_dict[m] = model_dict[m] + 1 score_dict[m] = score_dict[m] + scores[i] else: model_dict[m] = 1 score_dict[m] = scores[i] model_dict.items().sort() out = open(self.prefix + '.sta', 'a') keys = model_dict.keys() counts = flex.double(model_dict.values()) scores = flex.double(score_dict.values()) / counts print >> out, comment print >> out, "Sorted by Appearances:" order = flex.sort_permutation(counts) for o in order: print >> out, self.codes[keys[o]], counts[o], scores[o] print >> out, "Sorted by Average Score:" order = flex.sort_permutation(scores) for o in order: print >> out, self.codes[keys[o]], counts[o], scores[o] out.close()
def embed(self, n_dimensions, n_points): x = [] for ii in range(n_points): x.append(flex.random_double(n_dimensions) * 100) l = float(self.l) for mm in range(self.max_cycle): atom_order = flex.sort_permutation(flex.random_double(len(x))) strain = 0.0 for ii in atom_order: n_contacts = len(self.dmat[ii]) jj_index = flex.sort_permutation( flex.random_double(n_contacts))[0] jj_info = self.dmat[ii][jj_index] jj = jj_info[0] td = jj_info[1] xi = x[ii] xj = x[jj] cd = math.sqrt(flex.sum((xi - xj) * (xi - xj))) new_xi = xi + l * 0.5 * (td - cd) / (cd + self.eps) * (xi - xj) new_xj = xj + l * 0.5 * (td - cd) / (cd + self.eps) * (xj - xi) strain += abs(cd - td) x[ii] = new_xi x[jj] = new_xj l = l - self.dl return x, strain / len(x)
def plots(a_data, b_data, a_sigmas, b_sigmas): # Diagnostic use of the (I - <I>) / sigma distribution, should have mean=0, std=1 a_variance = a_sigmas * a_sigmas b_variance = b_sigmas * b_sigmas mean_num = (a_data / (a_variance)) + (b_data / (b_variance)) mean_den = (1. / (a_variance)) + (1. / (b_variance)) mean_values = mean_num / mean_den delta_I_a = a_data - mean_values normal_a = delta_I_a / (a_sigmas) stats_a = flex.mean_and_variance(normal_a) print "\nA mean %7.4f std %7.4f" % ( stats_a.mean(), stats_a.unweighted_sample_standard_deviation()) order_a = flex.sort_permutation(normal_a) delta_I_b = b_data - mean_values normal_b = delta_I_b / (b_sigmas) stats_b = flex.mean_and_variance(normal_b) print "B mean %7.4f std %7.4f" % ( stats_b.mean(), stats_b.unweighted_sample_standard_deviation()) order_b = flex.sort_permutation(normal_b) # plots for debugging from matplotlib import pyplot as plt cumnorm = plt.subplot(321) cumnorm.plot(xrange(len(order_a)), normal_a.select(order_a), "b.") cumnorm.plot(xrange(len(order_b)), normal_b.select(order_b), "r.") #plt.show() logger = plt.subplot(324) logger.loglog(a_data, b_data, "r.") delta = plt.subplot(322) delta.plot(a_data, delta_I_a, "g.") #plt.show() #nselection = (flex.abs(normal_a) < 2.).__and__(flex.abs(normal_b) < 2.) gam = plt.subplot(323) gam.plot(mean_values, normal_a, "b.") sigs = plt.subplot(326) sigs.plot(a_sigmas, b_sigmas, "g.") mean_order = flex.sort_permutation(mean_values) scatters = flex.double(50) scattersb = flex.double(50) for isubsection in xrange(50): subselect = mean_order[isubsection * len(mean_order) // 50:(isubsection + 1) * len(mean_order) // 50] vals = normal_a.select(subselect) #scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_standard_deviation() scatters[isubsection] = flex.mean_and_variance( vals).unweighted_sample_variance() valsb = normal_b.select(subselect) #scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_standard_deviation() scattersb[isubsection] = flex.mean_and_variance( valsb).unweighted_sample_variance() aaronsplot = plt.subplot(325) aaronsplot.plot(xrange(50), 2. * scatters, "b.") plt.show()
def plots(a_data, b_data, a_sigmas, b_sigmas): # Diagnostic use of the (I - <I>) / sigma distribution, should have mean=0, std=1 a_variance = a_sigmas * a_sigmas b_variance = b_sigmas * b_sigmas mean_num = (a_data/ (a_variance) ) + (b_data/ (b_variance) ) mean_den = (1./ (a_variance) ) + (1./ (b_variance) ) mean_values = mean_num / mean_den delta_I_a = a_data - mean_values normal_a = delta_I_a / (a_sigmas) stats_a = flex.mean_and_variance(normal_a) print "\nA mean %7.4f std %7.4f"%(stats_a.mean(),stats_a.unweighted_sample_standard_deviation()) order_a = flex.sort_permutation(normal_a) delta_I_b = b_data - mean_values normal_b = delta_I_b / (b_sigmas) stats_b = flex.mean_and_variance(normal_b) print "B mean %7.4f std %7.4f"%(stats_b.mean(),stats_b.unweighted_sample_standard_deviation()) order_b = flex.sort_permutation(normal_b) # plots for debugging from matplotlib import pyplot as plt cumnorm = plt.subplot(321) cumnorm.plot(xrange(len(order_a)),normal_a.select(order_a),"b.") cumnorm.plot(xrange(len(order_b)),normal_b.select(order_b),"r.") #plt.show() logger = plt.subplot(324) logger.loglog(a_data,b_data,"r.") delta = plt.subplot(322) delta.plot(a_data, delta_I_a, "g.") #plt.show() #nselection = (flex.abs(normal_a) < 2.).__and__(flex.abs(normal_b) < 2.) gam = plt.subplot(323) gam.plot(mean_values,normal_a,"b.") sigs = plt.subplot(326) sigs.plot(a_sigmas,b_sigmas,"g.") mean_order = flex.sort_permutation(mean_values) scatters = flex.double(50) scattersb = flex.double(50) for isubsection in xrange(50): subselect = mean_order[isubsection*len(mean_order)//50:(isubsection+1)*len(mean_order)//50] vals = normal_a.select(subselect) #scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_standard_deviation() scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_variance() valsb = normal_b.select(subselect) #scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_standard_deviation() scattersb[isubsection] = flex.mean_and_variance(valsb).unweighted_sample_variance() aaronsplot = plt.subplot(325) aaronsplot.plot(xrange(50), 2. * scatters, "b.") plt.show()
def sort(self): perm = flex.sort_permutation(data=flex.abs( flex.double(self.array_of_a())), reverse=True) return sum(flex.select(self.array_of_a(), perm), flex.select(self.array_of_b(), perm), self.c(), self.use_c())
def _get_sorted(O, unit_cell, sites_cart, pdb_atoms, by_value="residual", use_segids_in_place_of_chainids=False): assert by_value in ["residual", "delta"] if (O.size() == 0): return [] import cctbx.geometry_restraints from scitbx.array_family import flex deltas = flex.abs(O.deltas(sites_cart=sites_cart)) residuals = O.residuals(sites_cart=sites_cart) if (by_value == "residual"): data_to_sort = residuals elif (by_value == "delta"): data_to_sort = deltas i_proxies_sorted = flex.sort_permutation(data=data_to_sort, reverse=True) sorted_table = [] for i_proxy in i_proxies_sorted: proxy = O[i_proxy] sigma = cctbx.geometry_restraints.weight_as_sigma(proxy.weight) score = sqrt(residuals[i_proxy]) / sigma proxy_atoms = get_atoms_info( pdb_atoms, iselection=proxy.i_seqs, use_segids_in_place_of_chainids=use_segids_in_place_of_chainids) sorted_table.append((proxy, proxy_atoms)) return sorted_table
def __init__(self,imgobj,phil,inputpd,verbose=False): adopt_init_args(self,locals()) self.active_areas = imgobj.get_tile_manager(phil).effective_tiling_as_flex_int() B = self.active_areas #figure out which asics are on the central four sensors assert len(self.active_areas)%4 == 0 # apply an additional margin of 1 pixel, since we don't seem to be # registering the global margin. asics = [(B[i]+1,B[i+1]+1,B[i+2]-1,B[i+3]-1) for i in xrange(0,len(B),4)] from scitbx.matrix import col centre_mm = col((float(inputpd["xbeam"]),float(inputpd["ybeam"]))) centre = centre_mm / float(inputpd["pixel_size"]) distances = flex.double() cenasics = flex.vec2_double() self.corners = [] for iasic in xrange(len(asics)): cenasic = ((asics[iasic][2] + asics[iasic][0])/2. , (asics[iasic][3] + asics[iasic][1])/2. ) cenasics.append(cenasic) distances.append(math.hypot(cenasic[0]-centre[0], cenasic[1]-centre[1])) orders = flex.sort_permutation(distances) self.flags = flex.int(len(asics),0) #Use the central 8 asics (central 4 sensors) self.green = [] for i in xrange(32): #self.green.append( cenasics[orders[i]] ) self.corners.append (asics[orders[i]]) #self.green.append((self.corners[-1][0],self.corners[-1][1])) self.flags[orders[i]]=1 self.asic_filter = "distl.tile_flags="+",".join(["%1d"%b for b in self.flags])
def plot(self, dano_summation): from matplotlib import pyplot as plt if self.params.use_weights: wt = 1. / (self.diffs.sigmas() * self.diffs.sigmas()) order = flex.sort_permutation(wt) wt = wt.select(order) df = self.diffs.data().select(order) dano = dano_summation.select(self.sel0).select(order) from matplotlib.colors import Normalize dnorm = Normalize() dnorm.autoscale(wt.as_numpy_array()) CMAP = plt.get_cmap("rainbow") for ij in xrange(len(self.diffs.data())): #blue represents zero weight: red, large weight plt.plot([df[ij]], [dano[ij]], color=CMAP(dnorm(wt[ij])), marker=".", markersize=4) else: plt.plot(self.diffs.data(), dano_summation.select(self.sel0), "r,") plt.axes().set_aspect("equal") plt.axes().set_xlabel("Observed Dano") plt.axes().set_ylabel("Model Dano") plt.show()
def rmerge_vs_batch(intensities, batches): """Determine batches and Rmerge values per batch.""" assert intensities.size() == batches.size() intensities = intensities.map_to_asu() merging = intensities.merge_equivalents() merged_intensities = merging.array() perm = flex.sort_permutation(batches.data()) batches = batches.data().select(perm) intensities = intensities.select(perm) pairs = miller.match_multi_indices(merged_intensities.indices(), intensities.indices()).pairs() def r_merge_per_batch(pairs): """Calculate R_merge for the list of (merged-I, I) pairs.""" merged_indices, unmerged_indices = zip(*pairs) unmerged_Ij = intensities.data().select(flex.size_t(unmerged_indices)) merged_Ij = merged_intensities.data().select( flex.size_t(merged_indices)) numerator = flex.sum(flex.abs(unmerged_Ij - merged_Ij)) denominator = flex.sum(unmerged_Ij) if denominator > 0: return numerator / denominator return 0 return _batch_bins_and_data(batches, pairs, function_to_apply=r_merge_per_batch)
def accumulate_dose(imagesets): from scitbx.array_family import flex epochs = flex.double() exposure_times = flex.double() for imageset in imagesets: scan = imageset.get_scan() # workaround for read_all_image_headers=False option epochs.extend(flex.double( e if e else float(os.stat(imageset.get_path(j)).st_mtime) for (j, e) in enumerate(scan.get_epochs()))) exposure_times.extend(scan.get_exposure_times()) perm = flex.sort_permutation(epochs) epochs = epochs.select(perm) exposure_times = exposure_times.select(perm) from libtbx.containers import OrderedDict integrated_dose = OrderedDict() total = 0.0 for e, t in zip(epochs, exposure_times): integrated_dose[e] = total + 0.5 * t total += t return integrated_dose
def concentration_step(h, data, T, S): """Practical application of Theorem 1 of R&vD""" d2s = maha_dist_sq(data, T, S) p = flex.sort_permutation(d2s) H1 = [col.select(p)[0:h] for col in data] return H1
def _initial_trace(self): """ Place dummy atoms into map """ sites_cart = self.mmm.map_manager().trace_atoms_in_map( dist_min=2, n_atoms=self.mmm.model().size()) # fmt = "HETATM %4d O HOH %5d %8.3f%8.3f%8.3f 1.00 30.00 O" lines = "\n".join([ fmt % (i, i, sc[0], sc[1], sc[2]) for i, sc in enumerate(sites_cart) ]) pdb_inp = iotbx.pdb.input(source_info=None, lines=lines) model = get_model_adhoc(crystal_symmetry=self.cs, pdb_inp=pdb_inp) self.states.add(hierarchy=model.get_hierarchy()) model = sa_simple(model=model, map_data=self.map_data, log=null_out()) # sites_cart = model.get_sites_cart() self.states.add(hierarchy=model.get_hierarchy()) # start, end = get_se(coords=sites_cart, uc=self.uc) distances = flex.double([dist(start, s, self.uc) for s in sites_cart]) sel = flex.sort_permutation(distances) sites_cart = sites_cart.select(sel) # return list(sites_cart)
def candidate_orientation_matrices(basis_vectors, max_combinations=None): # select unique combinations of input vectors to test # the order of combinations is such that combinations comprising vectors # nearer the beginning of the input list will appear before combinations # comprising vectors towards the end of the list n = len(basis_vectors) # hardcoded limit on number of vectors, fixes issue #72 # https://github.com/dials/dials/issues/72 n = min(n, 100) basis_vectors = basis_vectors[:n] combinations = flex.vec3_int(flex.nested_loop((n, n, n))) combinations = combinations.select( flex.sort_permutation(combinations.as_vec3_double().norms())) # select only those combinations where j > i and k > j i, j, k = combinations.as_vec3_double().parts() sel = flex.bool(len(combinations), True) sel &= j > i sel &= k > j combinations = combinations.select(sel) if max_combinations is not None and max_combinations < len(combinations): combinations = combinations[:max_combinations] half_pi = 0.5 * math.pi min_angle = 20 / 180 * math.pi # 20 degrees, arbitrary cutoff for i, j, k in combinations: a = basis_vectors[i] b = basis_vectors[j] angle = a.angle(b) if angle < min_angle or (math.pi - angle) < min_angle: continue a_cross_b = a.cross(b) gamma = a.angle(b) if gamma < half_pi: # all angles obtuse if possible please b = -b a_cross_b = -a_cross_b c = basis_vectors[k] if abs(half_pi - a_cross_b.angle(c)) < min_angle: continue alpha = b.angle(c, deg=True) if alpha < half_pi: c = -c if a_cross_b.dot(c) < 0: # we want right-handed basis set, therefore invert all vectors a = -a b = -b c = -c model = Crystal(a, b, c, space_group_symbol="P 1") uc = model.get_unit_cell() cb_op_to_niggli = uc.change_of_basis_op_to_niggli_cell() model = model.change_basis(cb_op_to_niggli) uc = model.get_unit_cell() params = uc.parameters() if uc.volume() > (params[0] * params[1] * params[2] / 100): # unit cell volume cutoff from labelit 2004 paper yield model
def same_sensor_table(self, verbose=True): radii = flex.double() # from-instrument-center distance in pixels delrot = flex.double() # delta rotation in degrees weight = flex.double() # displacement = [] # vector between two same-sensor ASICS in pixels for x in range(len(self.tiles) // 8): delrot.append(self.x[len(self.tiles) // 2 + 2 * x] - self.x[len(self.tiles) // 2 + 1 + 2 * x]) radii.append((self.radii[2 * x] + self.radii[2 * x + 1]) / 2) weight.append( min([self.tilecounts[2 * x], self.tilecounts[2 * x + 1]])) displacement.append( col((self.To_x[2 * x + 1], self.To_y[2 * x + 1])) - col((self.x[2 * (2 * x + 1)], self.x[2 * (2 * x + 1) + 1])) - col((self.To_x[2 * x], self.To_y[2 * x])) + col((self.x[2 * (2 * x)], self.x[2 * (2 * x) + 1]))) order = flex.sort_permutation(radii) if verbose: for x in order: print("%02d %02d %5.0f" % (2 * x, 2 * x + 1, weight[x]), end=' ') print("%6.1f" % radii[x], end=' ') print("%5.2f" % (delrot[x]), end=' ') print("%6.3f" % (displacement[x].length() - 194.)) # ASIC is 194; just print gap stats = flex.mean_and_variance( flex.double([t.length() - 194. for t in displacement]), weight) print("sensor gap is %7.3f px +/- %7.3f" % (stats.mean(), stats.gsl_stats_wsd()))
def target(self, vector): """ Compute the functional by first applying the current values for the sd parameters to the input data, then computing the complete set of normalized deviations and finally using those normalized deviations to compute the functional.""" sdfac, sdb, sdadd = vector[0],0.0,vector[1] a_new_variance, b_new_variance = ccp4_model.apply_sd_error_params( vector, a_data, b_data, a_sigmas, b_sigmas) mean_num = (a_data/ (a_new_variance) ) + (b_data/ (b_new_variance) ) mean_den = (1./ (a_new_variance) ) + (1./ (b_new_variance) ) mean_values = mean_num / mean_den delta_I_a = a_data - mean_values normal_a = delta_I_a / flex.sqrt(a_new_variance) delta_I_b = b_data - mean_values normal_b = delta_I_b / flex.sqrt(b_new_variance) mean_order = flex.sort_permutation(mean_values) scatters = flex.double(50) scattersb = flex.double(50) for isubsection in range(50): subselect = mean_order[isubsection*len(mean_order)//50:(isubsection+1)*len(mean_order)//50] vals = normal_a.select(subselect) scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_variance() valsb = normal_b.select(subselect) scattersb[isubsection] = flex.mean_and_variance(valsb).unweighted_sample_variance() f = flex.sum( flex.pow(1.-scatters, 2) ) print "f: % 12.1f, sdfac: %8.5f, sdb: %8.5f, sdadd: %8.5f"%(f, sdfac, sdb, sdadd) return f
def exercise_densely_distributed_singular_values(show_progress, full_coverage, klass): n = 40 m = 2 * n n_runs = 20 tol = 10 * scitbx.math.double_numeric_limits.epsilon gen = scitbx.linalg.random_normal_matrix_generator(m, n) sigmas = [] sigmas.append(flex.double([10**(-i / n) for i in range(n)])) sigmas.append(sigmas[0].select(flex.random_permutation(n))) sigmas.append(sigmas[0].reversed()) print("Densely distributed singular values:", end=' ') n_tests = 0 for i in range(n_runs): if not full_coverage and random.random() < 0.8: continue n_tests += 1 for i_case, sigma in enumerate(sigmas): a = gen.matrix_with_singular_values(sigma) svd = klass(a, accumulate_u=False, accumulate_v=False) if i_case > 0: sigma = sigma.select(flex.sort_permutation(sigma, reverse=True)) delta = (svd.sigma - sigma) / sigma / tol assert delta.all_lt(5) print("%i done." % n_tests)
def __init__(self, data, already_sorted=False): self.n_data = n = len(data) if not already_sorted: data = data.select(flex.sort_permutation(data, reverse=True)) if n == 0: self.cut = self.highest_stat = self.lowest_stat = None return if n == 1: self.cut = 1 self.highest_stat = data[0] self.lowest_stat = None return cut = None new_cut = n//2 while new_cut != cut: cut = new_cut hi, lo = self.statistics(data[:cut]), self.statistics(data[cut:]) for i, x in enumerate(data): if x >= hi: continue if abs(x - lo) < abs(x - hi): new_cut = i break if hi > lo: self.cut = cut else: self.cut = n self.highest_stat = hi self.lowest_stat = lo
def target(self, vector): """ Compute the functional by first applying the current values for the sd parameters to the input data, then computing the complete set of normalized deviations and finally using those normalized deviations to compute the functional.""" sdfac, sdb, sdadd = vector[0],0.0,vector[1] a_new_variance, b_new_variance = ccp4_model.apply_sd_error_params( vector, a_data, b_data, a_sigmas, b_sigmas) mean_num = (a_data/ (a_new_variance) ) + (b_data/ (b_new_variance) ) mean_den = (1./ (a_new_variance) ) + (1./ (b_new_variance) ) mean_values = mean_num / mean_den delta_I_a = a_data - mean_values normal_a = delta_I_a / flex.sqrt(a_new_variance) delta_I_b = b_data - mean_values normal_b = delta_I_b / flex.sqrt(b_new_variance) mean_order = flex.sort_permutation(mean_values) scatters = flex.double(50) scattersb = flex.double(50) for isubsection in xrange(50): subselect = mean_order[isubsection*len(mean_order)//50:(isubsection+1)*len(mean_order)//50] vals = normal_a.select(subselect) scatters[isubsection] = flex.mean_and_variance(vals).unweighted_sample_variance() valsb = normal_b.select(subselect) scattersb[isubsection] = flex.mean_and_variance(valsb).unweighted_sample_variance() f = flex.sum( flex.pow(1.-scatters, 2) ) print "f: % 12.1f, sdfac: %8.5f, sdb: %8.5f, sdadd: %8.5f"%(f, sdfac, sdb, sdadd) return f
def get_active_data(self, imgobj, phil): active_areas = imgobj.get_tile_manager( phil).effective_tiling_as_flex_int() data = imgobj.linearintdata active_data = flex.double() for tile in xrange(len(active_areas) // 4): block = data.matrix_copy_block( i_row=active_areas[4 * tile + 0], i_column=active_areas[4 * tile + 1], n_rows=active_areas[4 * tile + 2] - active_areas[4 * tile + 0], n_columns=active_areas[4 * tile + 3] - active_areas[4 * tile + 1]).as_1d().as_double() active_data = active_data.concatenate(block) #print "The mean is ",flex.mean(active_data),"on %d pixels"%len(active_data) order = flex.sort_permutation(active_data) #print "The 90-percentile pixel is ",active_data[order[int(0.9*len(active_data))]] #print "The 99-percentile pixel is ",active_data[order[int(0.99*len(active_data))]] adjlevel = 0.4 brightness = 1.0 percentile90 = active_data[order[int(0.9 * len(active_data))]] if percentile90 > 0.: self.correction = brightness * adjlevel / percentile90 else: self.correction = 1.0 return active_data
def get_files_sorted(pdb_files, hkl_files): ifn_p = open("/".join([pdb_files, "INDEX"]), "r") ifn_r = os.listdir(hkl_files) pdbs = flex.std_string() mtzs = flex.std_string() codes = flex.std_string() sizes = flex.double() cntr = 0 for lp in ifn_p.readlines(): lp = lp.strip() pdb_file_name = "/".join([pdb_files, lp]) assert os.path.isfile(pdb_file_name) pdb_code = lp[-11:-7] # # FOR DEBUGGING #if pdb_code != "4w71": continue # # lr = lp.replace("pdb", "r") hkl_file_name = "/".join([hkl_files, "%s.mtz" % pdb_code]) if (os.path.isfile(hkl_file_name)): cntr += 1 s = os.path.getsize(pdb_file_name) + os.path.getsize(hkl_file_name) pdbs.append(pdb_file_name) mtzs.append(hkl_file_name) codes.append(pdb_code) sizes.append(s) #if codes.size()==100: break print("Total:", cntr) sel = flex.sort_permutation(sizes) pdbs = pdbs.select(sel) mtzs = mtzs.select(sel) codes = codes.select(sel) sizes = sizes.select(sel) return pdbs, mtzs, codes, sizes
def _get_sorted (O, unit_cell, sites_cart, pdb_atoms, by_value="residual", use_segids_in_place_of_chainids=False) : assert by_value in ["residual", "delta"] if (O.size() == 0): return [] import cctbx.geometry_restraints from scitbx.array_family import flex deltas = flex.abs(O.deltas(sites_cart=sites_cart)) residuals = O.residuals(sites_cart=sites_cart) if (by_value == "residual"): data_to_sort = residuals elif (by_value == "delta"): data_to_sort = deltas i_proxies_sorted = flex.sort_permutation(data=data_to_sort, reverse=True) sorted_table = [] for i_proxy in i_proxies_sorted: proxy = O[i_proxy] sigma = cctbx.geometry_restraints.weight_as_sigma(proxy.weight) score = sqrt(residuals[i_proxy]) / sigma proxy_atoms = get_atoms_info(pdb_atoms, iselection=proxy.i_seqs, use_segids_in_place_of_chainids=use_segids_in_place_of_chainids) sorted_table.append((proxy, proxy_atoms)) return sorted_table
def __init__(self, model, map_data): adopt_init_args(self, locals()) # Find blob co = maptbx.connectivity(map_data=self.map_data, threshold=5.) #connectivity_map = co.result() #sorted_by_volume = sorted( # zip(co.regions(), range(0, co.regions().size())), key=lambda x: x[0], # reverse=True) #blob_indices = [] #for p in sorted_by_volume: # v, i = p # print v, i # if(i>0): # blob_indices.append(i) ####### # You get everything you need: map_result = co.result() volumes = co.regions() print volumes coors = co.maximum_coors() vals = co.maximum_values() minb, maxb = co.get_blobs_boundaries_tuples() # This will give you the order i_sorted_by_volume = flex.sort_permutation( data=volumes, reverse=True) # maybe co.regions() should go there for i in i_sorted_by_volume: print "blob #", i print coors[i] print vals[i] print maxb[i], minb[i]
def outlier_removal(O, outlier_factor=3): if (O.spots_xy0.size() < 3): return None distances = (O.spots_xy0 - O.predicted_spots).dot()**0.5 from scitbx.array_family import flex perm = flex.sort_permutation(distances, reverse=True) if (distances[perm[0]] > distances[perm[1]] * outlier_factor): return perm[1:] return None
def scales_vs_batch(scales, batches): """Determine batches and scale values per batch.""" assert scales.size() == batches.size() perm = flex.sort_permutation(batches.data()) batches = batches.data().select(perm) scales = scales.data().select(perm) return _batch_bins_and_data(batches, scales, function_to_apply=flex.mean)
def get_binned_intensities(self, n_bins=100): """ Using self.ISIGI, bin the intensities using the following procedure: 1) Find the minimum and maximum intensity values. 2) Divide max-min by n_bins. This is the bin step size The effect is @param n_bins number of bins to use. @return a tuple with an array of selections for each bin and an array of median intensity values for each bin. """ print >> self.log, "Computing intensity bins.", ISIGI = self.scaler.ISIGI meanI = ISIGI['mean_scaled_intensity'] sels = [] binned_intensities = [] if True: # intensity range per bin is the same min_meanI = flex.min(meanI) step = (flex.max(meanI) - min_meanI) / n_bins print >> self.log, "Bin size:", step self.bin_indices = flex.int(len(ISIGI), -1) for i in range(n_bins): if i + 1 == n_bins: sel = (meanI >= (min_meanI + step * i)) else: sel = (meanI >= (min_meanI + step * i)) & (meanI < (min_meanI + step * (i + 1))) if sel.all_eq(False): continue sels.append(sel) self.bin_indices.set_selected(sel, len(sels) - 1) binned_intensities.append((step / 2 + step * i) + min_meanI) assert (self.bin_indices == -1).count(True) == False else: # n obs per bin is the same sorted_meanI = meanI.select(flex.sort_permutation(meanI)) bin_size = len(meanI) / n_bins for i in range(n_bins): bin_min = sorted_meanI[int(i * bin_size)] sel = meanI >= bin_min if i + 1 == n_bins: bin_max = sorted_meanI[-1] else: bin_max = sorted_meanI[int((i + 1) * bin_size)] sel &= meanI < bin_max sels.append(sel) binned_intensities.append(bin_min + ((bin_max - bin_min) / 2)) for i, (sel, intensity) in enumerate(zip(sels, binned_intensities)): print >> self.log, "Bin %02d, number of observations: % 10d, midpoint intensity: %f" % ( i, sel.count(True), intensity) return sels, binned_intensities
def sort(self): perm = flex.sort_permutation( data=flex.abs(flex.double(self.array_of_a())), reverse=True) return sum( flex.select(self.array_of_a(), perm), flex.select(self.array_of_b(), perm), self.c(), self.use_c())
def optimise_basis_vectors(reciprocal_lattice_points, vectors): optimised = flex.vec3_double() for vector in vectors: minimised = BasisVectorMinimiser(reciprocal_lattice_points, vector) optimised.append(tuple(minimised.x)) functionals = flex.double(minimised.target.compute_functional(v) for v in vectors) perm = flex.sort_permutation(functionals) optimised = optimised.select(perm) return optimised
def choose_best(self): sel = self.rs < 0.2 if(sel.size()==0): return None bs = self.bs.select(sel) xrss = [self.xrss[i].deep_copy_scatterers() for i in sel.iselection()] sel = flex.sort_permutation(bs) bs_all_list = list(self.bs) bs_list = list(bs) print "best mc: ",bs_all_list.index(min(bs_list))-1 return xrss[sel[0]]
def evolve(self): for ii in xrange(self.population_size): rnd = flex.random_double(self.population_size - 1) permut = flex.sort_permutation(rnd) # make parent indices i1 = permut[0] if (i1 >= ii): i1 += 1 i2 = permut[1] if (i2 >= ii): i2 += 1 i3 = permut[2] if (i3 >= ii): i3 += 1 # x1 = self.population[i1] x2 = self.population[i2] x3 = self.population[i3] if self.f is None: use_f = random.random() / 2.0 + 0.5 else: use_f = self.f vi = x1 + use_f * (x2 - x3) # prepare the offspring vector please rnd = flex.random_double(self.vector_length) permut = flex.sort_permutation(rnd) test_vector = self.population[ii].deep_copy() # first the parameters that sure cross over for jj in xrange(self.vector_length): if (jj < self.n_cross): test_vector[permut[jj]] = vi[permut[jj]] else: if (rnd[jj] > self.cr): test_vector[permut[jj]] = vi[permut[jj]] # get the score please test_score = self.evaluator.target(test_vector) # check if the score if lower if test_score < self.scores[ii]: self.scores[ii] = test_score self.population[ii] = test_vector
def evolve(self): for ii in xrange(self.population_size): rnd = flex.random_double(self.population_size-1) permut = flex.sort_permutation(rnd) # make parent indices i1=permut[0] if (i1>=ii): i1+=1 i2=permut[1] if (i2>=ii): i2+=1 i3=permut[2] if (i3>=ii): i3+=1 # x1 = self.population[ i1 ] x2 = self.population[ i2 ] x3 = self.population[ i3 ] if self.f is None: use_f = random.random()/2.0 + 0.5 else: use_f = self.f vi = x1 + use_f*(x2-x3) # prepare the offspring vector please rnd = flex.random_double(self.vector_length) permut = flex.sort_permutation(rnd) test_vector = self.population[ii].deep_copy() # first the parameters that sure cross over for jj in xrange( self.vector_length ): if (jj<self.n_cross): test_vector[ permut[jj] ] = vi[ permut[jj] ] else: if (rnd[jj]>self.cr): test_vector[ permut[jj] ] = vi[ permut[jj] ] # get the score please test_score = self.evaluator.target( test_vector ) # check if the score if lower if test_score < self.scores[ii] : self.scores[ii] = test_score self.population[ii] = test_vector
def __init__(self, rs_vectors, percentile=0.05): from scitbx.array_family import flex NEAR = 10 self.NNBIN = 5 # target number of neighbors per histogram bin # nearest neighbor analysis from annlib_ext import AnnAdaptor query = flex.double() for spot in rs_vectors: # spots, in reciprocal space xyz query.append(spot[0]) query.append(spot[1]) query.append(spot[2]) assert len( rs_vectors) > NEAR # Can't do nearest neighbor with too few spots IS_adapt = AnnAdaptor(data=query, dim=3, k=1) IS_adapt.query(query) direct = flex.double() for i in range(len(rs_vectors)): direct.append(1.0 / math.sqrt(IS_adapt.distances[i])) # determine the most probable nearest neighbor distance (direct space) hst = flex.histogram(direct, n_slots=int(len(rs_vectors) / self.NNBIN)) centers = hst.slot_centers() islot = hst.slots() highest_bin_height = flex.max(islot) most_probable_neighbor = centers[list(islot).index(highest_bin_height)] if False: # to print out the histogramming analysis smin, smax = flex.min(direct), flex.max(direct) stats = flex.mean_and_variance(direct) import sys out = sys.stdout print(" range: %6.2f - %.2f" % (smin, smax), file=out) print(" mean: %6.2f +/- %6.2f on N = %d" % (stats.mean(), stats.unweighted_sample_standard_deviation(), direct.size()), file=out) hst.show(f=out, prefix=" ", format_cutoffs="%6.2f") print("", file=out) # determine the 5th-percentile direct-space distance perm = flex.sort_permutation(direct, reverse=True) percentile = direct[perm[int(percentile * len(rs_vectors))]] MAXTOL = 1.5 # Margin of error for max unit cell estimate self.max_cell = max(MAXTOL * most_probable_neighbor, MAXTOL * percentile) if False: self.plot(direct)
def generate_and_score_samples(self): sample_list = [] target_list = flex.double() for ii in range(self.sample_size): x = random_transform.t_variate(a=max(2,self.n-1),N=self.n) x = x*self.sigma + self.mean t = self.compute_target(x ) sample_list.append( x ) target_list.append( t ) order = flex.sort_permutation( flex.double(target_list) ) return sample_list, t, order
def generate_and_score_samples(self): sample_list = [] target_list = flex.double() for ii in range(self.sample_size): x = random_transform.t_variate(a=max(2, self.n - 1), N=self.n) x = x * self.sigma + self.mean t = self.compute_target(x) sample_list.append(x) target_list.append(t) order = flex.sort_permutation(flex.double(target_list)) return sample_list, t, order
def set_up_hitfinder(self): # See r17537 of mod_average.py. device = cspad_tbx.address_split(self.address)[2] if device == 'Cspad': img_dim = (1765, 1765) pixel_size = cspad_tbx.pixel_size elif device == 'marccd': img_dim = (4500, 4500) pixel_size = 0.079346 elif device == 'Rayonix': img_dim = rayonix_tbx.get_rayonix_detector_dimensions( self.bin_size) pixel_size = rayonix_tbx.get_rayonix_pixel_size(self.bin_size) else: raise RuntimeError("Unsupported device %s" % self.address) if self.beam_center is None: self.beam_center = [0, 0] self.hitfinder_d = cspad_tbx.dpack( active_areas=self.active_areas, beam_center_x=pixel_size * self.beam_center[0], beam_center_y=pixel_size * self.beam_center[1], data=flex.int(flex.grid(img_dim[0], img_dim[1]), 0), xtal_target=self.m_xtal_target) if device == 'Cspad': # Figure out which ASIC:s are on the central four sensors. This # only applies to the CSPAD. assert len(self.active_areas) % 4 == 0 distances = flex.double() for i in range(0, len(self.active_areas), 4): cenasic = ( (self.active_areas[i + 0] + self.active_areas[i + 2]) / 2, (self.active_areas[i + 1] + self.active_areas[i + 3]) / 2) distances.append( math.hypot(cenasic[0] - self.beam_center[0], cenasic[1] - self.beam_center[1])) orders = flex.sort_permutation(distances) # Use the central 8 ASIC:s (central 4 sensors). flags = flex.int(len(self.active_areas) // 4, 0) for i in range(8): flags[orders[i]] = 1 self.asic_filter = "distl.tile_flags=" + ",".join( ["%1d" % b for b in flags]) elif device == 'marccd': # There is only one active area for the MAR CCD, so use it. self.asic_filter = "distl.tile_flags=1" elif device == 'Rayonix': # There is only one active area for the Rayonix, so use it. self.asic_filter = "distl.tile_flags=1"
def write_sorted_moduli_as_mathematica_plot(f, filename): """ To obtain fig. 1 in ref [2] in module charge_flipping """ abs_f = flex.abs(f.data()) sorted = abs_f.select(flex.sort_permutation(abs_f)) sorted /= flex.max(sorted) mf = open(os.path.expanduser(filename), 'w') print >> mf, 'fp1 = {' for f in sorted: print >> mf, "%f, " % f print >> mf, "1 };" print >> mf, "ListPlot[fp1]" mf.close()
def stats_profile(data): from scitbx.array_family import flex fdata = flex.double() for item in data: fdata.append(item) perm = flex.sort_permutation(fdata) percentile05 = int(0.05 * len(fdata)) percentile95 = int(0.95 * len(fdata)) '''The return value is the ratio of the 95%ile value to the 5%ile value. This gives some measure of how braod the spread is between big and small spots.''' return fdata[perm[percentile95]] / fdata[perm[percentile05]]
def sort_cosets(self): new_partitions = [] for pp in self.partitions: orders = [] for op in pp: orders.append(op.r().info().type()) orders = flex.sort_permutation(flex.int(orders)) tmp = partition_t() for ii in orders: tmp.append(pp[ii]) new_partitions.append(tmp) self.partitions = new_partitions
def sort_cosets(self): new_partitions = [] for pp in self.partitions: orders = [] for op in pp: orders.append( op.r().info().type() ) orders = flex.sort_permutation( flex.int(orders) ) tmp = partition_t() for ii in orders: tmp.append( pp[ii] ) new_partitions.append( tmp ) self.partitions = new_partitions
def percentile(x, percent): ## see http://cnx.rice.edu/content/m10805/latest/ assert percent >=0.0 assert percent <=1.0 n = x.size() order = flex.sort_permutation(x) np = float(n+1)*percent ir = int(np) fr = np - int(np) tmp1 = x[ order[ir-1] ] tmp2 = x[ order[ir] ] result = tmp1 + fr*(tmp2-tmp1) return result
def plot_relative_anomalous_cc(self, racc, labels=None): perm = flex.sort_permutation(racc) fig = pyplot.figure(dpi=1200, figsize=(16,12)) pyplot.bar(range(len(racc)), list(racc.select(perm))) if labels is None: labels = ["%.0f" %(j+1) for j in perm] assert len(labels) == len(racc) pyplot.xticks([i+0.5 for i in range(len(racc))], labels) locs, labels = pyplot.xticks() pyplot.setp(labels, rotation=70) pyplot.xlabel("Dataset") pyplot.ylabel("Relative anomalous correlation coefficient") fig.savefig("racc.png")
def show_minimize_multi_histogram(f=None, reset=True): global minimize_multi_histogram minimizer_types = minimize_multi_histogram.keys() counts = flex.double(minimize_multi_histogram.values()) perm = flex.sort_permutation(data=counts, reverse=True) minimizer_types = flex.select(minimizer_types, perm) counts = counts.select(perm) n_total = flex.sum(counts) for m,c in zip(minimizer_types, counts): print >> f, "%-39s %5.3f %6d" % (m, c/max(1,n_total), c) print >> f if (reset): minimize_multi_histogram = {"None": 0}
def stats_profile(data): from scitbx.array_family import flex fdata = flex.double() for item in data: fdata.append(item) perm = flex.sort_permutation(fdata) percentile05 = int(0.05*len(fdata)) percentile95 = int(0.95*len(fdata)) '''The return value is the ratio of the 95%ile value to the 5%ile value. This gives some measure of how braod the spread is between big and small spots.''' return fdata[perm[percentile95]]/fdata[perm[percentile05]]
def find_top(self, topn): orders = flex.sort_permutation(self.scores) for ii in range(topn): o = orders[ii] b = o // (self.ngrid * self.ngrid) a = (o - self.ngrid * self.ngrid * b) // self.ngrid g = o - self.ngrid * self.ngrid * b - self.ngrid * a b = self.beta[b] g = smath.pi * 2.0 * (float(g) / (self.ngrid - 1)) a = smath.pi * 2.0 * (float(a) / (self.ngrid - 1)) self.top_align.append(flex.double((a, b, g))) self.top_scores.append(self.scores[o])
def set_score(O): if (len(O.array) == 1): O.i_small = 0 _ = O.array[0] O.score = _.n / (1 + _.rms) else: from scitbx.array_family import flex rms_list = flex.double([_.rms for _ in O.array]) sort_perm = flex.sort_permutation(rms_list) O.i_small = sort_perm[0] i_2nd = sort_perm[1] rms_min, rms_2nd = [rms_list[_] for _ in sort_perm[:2]] O.score = (rms_2nd - rms_min) * (O.array[O.i_small].n + O.array[i_2nd].n)
def merge(O, other, pair_info, reindexing_assistant, image_mdls): # TODO: refine combined scales so that rms for entire cluster # is minimal then compute esti miis_i, esti_i = O.miis_perms[0], O.esti_perms[0] j_perm = pair_info.i_small miis_j, esti_j = other.miis_perms[j_perm], other.esti_perms[j_perm] scale_j = pair_info.array[j_perm].scale mrg_miis = miis_i.concatenate(miis_j) mrg_esti = esti_i.concatenate(esti_j * (1/scale_j)) from scitbx.array_family import flex sort_perm = flex.sort_permutation(mrg_miis) mrg_miis = mrg_miis.select(sort_perm) mrg_esti = mrg_esti.select(sort_perm) new_miis = flex.size_t() new_esti = flex.double() n = mrg_miis.size() i = 0 while (i < n): new_miis.append(mrg_miis[i]) if (i+1 == n or mrg_miis[i] != mrg_miis[i+1]): new_esti.append(mrg_esti[i]) i += 1 else: new_esti.append((mrg_esti[i] + mrg_esti[i+1]) / 2) i += 2 for i_img,i_perm_and_scale_ in other.i_perm_and_scale_by_i_img.items(): O.i_perm_and_scale_by_i_img[i_img] = i_perm_and_scale( i_perm=reindexing_assistant.i_inv_j_multiplication_table[ j_perm][ i_perm_and_scale_.i_perm], scale=scale_j*i_perm_and_scale_.scale) O.miis_perms = [] O.esti_perms = [] for perm in reindexing_assistant.inv_perms: m = perm.select(new_miis) p = flex.sort_permutation(data=m) O.miis_perms.append(m.select(p)) O.esti_perms.append(new_esti.select(p))
def __init__(self, rs_vectors, percentile=0.05): from scitbx.array_family import flex NEAR = 10 self.NNBIN = 5 # target number of neighbors per histogram bin # nearest neighbor analysis from annlib_ext import AnnAdaptor query = flex.double() for spot in rs_vectors: # spots, in reciprocal space xyz query.append(spot[0]) query.append(spot[1]) query.append(spot[2]) assert len(rs_vectors)>NEAR # Can't do nearest neighbor with too few spots IS_adapt = AnnAdaptor(data=query,dim=3,k=1) IS_adapt.query(query) direct = flex.double() for i in xrange(len(rs_vectors)): direct.append(1.0/math.sqrt(IS_adapt.distances[i])) # determine the most probable nearest neighbor distance (direct space) hst = flex.histogram(direct, n_slots=int(len(rs_vectors)/self.NNBIN)) centers = hst.slot_centers() islot = hst.slots() highest_bin_height = flex.max(islot) most_probable_neighbor = centers[list(islot).index(highest_bin_height)] if False: # to print out the histogramming analysis smin, smax = flex.min(direct), flex.max(direct) stats = flex.mean_and_variance(direct) import sys out = sys.stdout print >> out, " range: %6.2f - %.2f" % (smin, smax) print >> out, " mean: %6.2f +/- %6.2f on N = %d" % ( stats.mean(), stats.unweighted_sample_standard_deviation(), direct.size()) hst.show(f=out, prefix=" ", format_cutoffs="%6.2f") print >> out, "" # determine the 5th-percentile direct-space distance perm = flex.sort_permutation(direct, reverse=True) percentile = direct[perm[int(percentile * len(rs_vectors))]] MAXTOL = 1.5 # Margin of error for max unit cell estimate self.max_cell = max( MAXTOL * most_probable_neighbor, MAXTOL * percentile) if False: self.plot(direct)
def set_up_hitfinder(self): # See r17537 of mod_average.py. device = cspad_tbx.address_split(self.address)[2] if device == 'Cspad': img_dim = (1765, 1765) pixel_size = cspad_tbx.pixel_size elif device == 'marccd': img_dim = (4500, 4500) pixel_size = 0.079346 elif device == 'Rayonix': img_dim = rayonix_tbx.get_rayonix_detector_dimensions(self.bin_size) pixel_size = rayonix_tbx.get_rayonix_pixel_size(self.bin_size) else: raise RuntimeError("Unsupported device %s" % self.address) if self.beam_center is None: self.beam_center = [0,0] self.hitfinder_d = cspad_tbx.dpack( active_areas=self.active_areas, beam_center_x=pixel_size * self.beam_center[0], beam_center_y=pixel_size * self.beam_center[1], data=flex.int(flex.grid(img_dim[0], img_dim[1]), 0), xtal_target=self.m_xtal_target) if device == 'Cspad': # Figure out which ASIC:s are on the central four sensors. This # only applies to the CSPAD. assert len(self.active_areas) % 4 == 0 distances = flex.double() for i in range(0, len(self.active_areas), 4): cenasic = ((self.active_areas[i + 0] + self.active_areas[i + 2]) / 2, (self.active_areas[i + 1] + self.active_areas[i + 3]) / 2) distances.append(math.hypot(cenasic[0] - self.beam_center[0], cenasic[1] - self.beam_center[1])) orders = flex.sort_permutation(distances) # Use the central 8 ASIC:s (central 4 sensors). flags = flex.int(len(self.active_areas) // 4, 0) for i in range(8): flags[orders[i]] = 1 self.asic_filter = "distl.tile_flags=" + ",".join( ["%1d" % b for b in flags]) elif device == 'marccd': # There is only one active area for the MAR CCD, so use it. self.asic_filter = "distl.tile_flags=1" elif device == 'Rayonix': # There is only one active area for the Rayonix, so use it. self.asic_filter = "distl.tile_flags=1"
def detector_origin_analysis(self): self.FRAMES["detector_origin_x_refined"]=flex.double() self.FRAMES["detector_origin_y_refined"]=flex.double() self.FRAMES["distance_refined"]=flex.double() for iframe in xrange(len(self.FRAMES["frame_id"])): if iframe < self.n_refined_frames: SIGN = -1. PIXEL_SZ = 0.11 # mm/pixel detector_origin = col((-self.FRAMES["beam_x"][iframe] + SIGN * PIXEL_SZ * self.frame_translations.x[2*iframe], -self.FRAMES["beam_y"][iframe] + SIGN * PIXEL_SZ * self.frame_translations.x[1+2*iframe], 0.)) self.FRAMES["detector_origin_x_refined"].append(detector_origin[0]) self.FRAMES["detector_origin_y_refined"].append(detector_origin[1]) self.FRAMES["distance_refined"].append( self.frame_distances.x[iframe] + self.FRAMES["distance"][iframe] ) xm = flex.mean_and_variance(self.FRAMES["detector_origin_x_refined"]) ym = flex.mean_and_variance(self.FRAMES["detector_origin_y_refined"]) print "Beam x mean %7.3f sigma %7.3f mm"%( xm.mean(), xm.unweighted_sample_standard_deviation()) print "Beam y mean %7.3f sigma %7.3f mm"%( ym.mean(), ym.unweighted_sample_standard_deviation()) time_series = False import os files = [os.path.basename(f) for f in self.FRAMES["unique_file_name"]] longs = [long("".join([a for a in name if a.isdigit()]))//1000 for name in files] floats = flex.double([float(L) for L in longs])[ :len(self.FRAMES["detector_origin_x_refined"])] order = flex.sort_permutation(floats) time_sorted_x_beam = self.FRAMES["detector_origin_x_refined"].select(order) time_sorted_y_beam = self.FRAMES["detector_origin_y_refined"].select(order) if time_series: from matplotlib import pyplot as plt plt.plot(xrange(len(order)),time_sorted_x_beam,"r-") plt.plot(xrange(len(order)),time_sorted_y_beam,"b-") plt.show() for item in order: print files[item], "%8.3f %8.3f dist %8.3f"%( self.FRAMES["detector_origin_x_refined"][item], self.FRAMES["detector_origin_y_refined"][item], self.FRAMES["distance_refined"][item])
def __init__(self,working_phil): self.working_phil = working_phil # should corresponds to low angle scattering of crystal structure---up to 20 Angstroms from xfel.cxi.display_powder_arcs import get_mmtbx_icalc intensities = get_mmtbx_icalc( code = working_phil.viewer.calibrate_pdb.code, d_min = working_phil.viewer.calibrate_pdb.d_min, anomalous_flag=False) self.hkl_list = intensities.indices() self.uc = intensities.unit_cell() spacings = self.uc.d(self.hkl_list) rev_order = flex.sort_permutation(spacings,reverse = True) for x in xrange(len(rev_order)): print self.hkl_list[rev_order[x]], spacings[rev_order[x]] self.experimental_d = spacings.select(rev_order)
def full_width_half_max(x, y): y = y/flex.max(y) perm = flex.sort_permutation(x) y = y.select(perm) x = x.select(perm) x_lower = None x_upper = None for x_i, y_i in zip(x, y): if x_lower is None: if y_i >= 0.5: x_lower = x_i elif x_upper is None: if y_i <= 0.5: x_upper = x_i else: break return (x_upper - x_lower)
def build_usables(work_params, reindexing_assistant, image_mdls): from scitbx.array_family import flex usable_fractions = flex.double() usables = [] for i_img,im in enumerate(image_mdls.array): usable = im.usable( partiality_threshold=work_params.usable_partiality_threshold) usable_fractions.append(usable.miis.size() / im.miller_index_i_seqs.size()) miis_perms = [] for perm in reindexing_assistant.inv_perms: m = perm.select(usable.miis) p = flex.sort_permutation(data=m) miis_perms.append((m.select(p), usable.esti.select(p))) usables.append(miis_perms) print "Usable fraction of estimated image intensities:" usable_fractions.min_max_mean().show(prefix=" ") print sys.stdout.flush() return usables