def is_sym_equiv_interaction_simple(unit_cell, i_seq, site_frac_i, j_seq, site_frac_j, special_op_j, rt_mx_ji_1, rt_mx_ji_2): f = unit_cell.shortest_vector_sq()**.5*.1 trial_shifts = [f*x for x in [math.sqrt(2),math.sqrt(3),math.sqrt(5)]] frac = unit_cell.fractionalize orth = unit_cell.orthogonalize dist = unit_cell.distance for shifts in [[0,0,0], trial_shifts]: site_j_mod = special_op_j * frac([x+s for x,s in zip(orth(site_frac_j),shifts)]) if (shifts == [0,0,0] or j_seq != i_seq): site_i_mod = site_frac_i else: site_i_mod = site_j_mod d1 = dist(rt_mx_ji_1 * site_j_mod, site_i_mod) d2 = dist(rt_mx_ji_2 * site_j_mod, site_i_mod) if (shifts == [0,0,0]): if (abs(d1-d2) >= 1.e-3): return False return abs(d1-d2) < 1.e-3
def weighted_means(self): min_score = flex.min( self.scores ) mw = flex.mean( self.scores ) self.scores = self.scores-min_score wghts = flex.exp( -self.scores*0.50 ) sw = 1e-12+flex.sum( wghts ) mrg = flex.sum(wghts*self.rg)/sw srg = flex.sum(wghts*self.rg*self.rg)/sw mi0 = flex.sum(wghts*self.i0)/sw si0 = flex.sum(wghts*self.i0*self.i0)/sw si0 = math.sqrt(si0-mi0*mi0) srg = math.sqrt(srg-mrg*mrg) return mrg,srg,mi0,si0,mw
def velocity_rescaling(self): if self.protein_thermostat and self.er_data is not None: if self.kt.temperature <= 1.0e-10: self.v_factor = 1.0 else: self.v_factor = math.sqrt(self.temperature / self.non_solvent_kt.temperature) else: if self.kt.temperature <= 1.0e-10: self.v_factor = 1.0 else: self.v_factor = math.sqrt(self.temperature / self.kt.temperature) self.vyz_vscale_remove = self.vxyz * (1.0 - self.v_factor) self.kt_vscale_remove = dynamics.kinetic_energy_and_temperature(self.vyz_vscale_remove, self.weights) self.vxyz = self.vxyz * self.v_factor
def get_rg(self): r = self.dmax * flex.double(range(1, 101)) / 100.0 pr = self.f(r) rg2 = flex.sum(flex.pow(r, 2.0) * pr) norma = flex.sum(pr) rg = math.sqrt(rg2 / norma) / 1.414 return rg
def compute_all( self ): self.rg2s = [] self.lnis = [] self.free_scores = [] self.stop_qs = [] self.start_qs = [] for lim in self.lims: rg2, lni, score, free_score, stop_q, start_q = self.compute_rg( lim[0], lim[1] ) rg2, lni, score, free_score, start_q, stop_q = self.filter( rg2, lni, score, free_score, stop_q, start_q ) if rg2 is not None: self.accumulator.add_data( math.sqrt(rg2), math.exp(lni), free_score ) self.rg2s.append( math.sqrt(rg2) ) self.lnis.append( lni ) self.free_scores.append( score ) self.stop_qs.append( stop_q ) self.start_qs.append( stop_q )
def exercise_match_bijvoet_mates(): h0 = flex.miller_index(((1,2,3), (-1,-2,-3), (2,3,4), (-2,-3,-4), (3,4,5))) d0 = flex.double((1,2,3,4,5)) bm = miller.match_bijvoet_mates( sgtbx.space_group_type(), h0) bm = miller.match_bijvoet_mates( sgtbx.reciprocal_space_asu(sgtbx.space_group_type()), h0) bm = miller.match_bijvoet_mates( h0) assert tuple(bm.pairs()) == ((0,1), (2,3)) assert tuple(bm.singles("+")) == (4,) assert tuple(bm.singles("-")) == () assert bm.n_singles() != 0 assert tuple(bm.pairs_hemisphere_selection("+")) == (0, 2) assert tuple(bm.pairs_hemisphere_selection("-")) == (1, 3) assert tuple(bm.singles_hemisphere_selection("+")) == (4,) assert tuple(bm.singles_hemisphere_selection("-")) == () assert tuple(bm.miller_indices_in_hemisphere("+")) == ((1,2,3), (2,3,4)) assert tuple(bm.miller_indices_in_hemisphere("-")) == ((-1,-2,-3),(-2,-3,-4)) assert approx_equal(tuple(bm.minus(d0)), (-1, -1)) assert approx_equal(tuple(bm.additive_sigmas(d0)), [math.sqrt(x*x+y*y) for x,y in ((1,2), (3,4))]) assert approx_equal(tuple(bm.average(d0)), (3/2., 7/2.)) h0.append((1,2,3)) try: miller.match_bijvoet_mates(h0) except Exception: pass else: raise Exception_expected
def compute_functional_and_gradients(self): u_iso_refinable_params = self.apply_shifts() self.compute_target(compute_gradients=True) self.f = self.target_result.target() if (self.first_target_value is None): self.first_target_value = self.f if (self.occupancy_penalty is not None and self.grad_flags_counts != 0): occupancies = self.xray_structure.scatterers().extract_occupancies( ) for occupancy in occupancies: self.f += self.occupancy_penalty.functional( occupancy=occupancy) self.g = self.structure_factor_gradients( xray_structure=self.xray_structure, u_iso_refinable_params=u_iso_refinable_params, miller_set=self.target_functor.f_obs(), d_target_d_f_calc=self.target_result.derivatives(), n_parameters=self.x.size(), algorithm=self.structure_factor_algorithm).packed() if (self.occupancy_penalty is not None and self.grad_flags_counts != 0): g = flex.double() for occupancy in occupancies: g.append(self.occupancy_penalty.gradient(occupancy=occupancy)) del occupancies add_gradients(scatterers=self.xray_structure.scatterers(), xray_gradients=self.g, occupancy_gradients=g) del g if (self.verbose > 1): print "xray.minimization line search: f,rms(g):", print self.f, math.sqrt(flex.mean_sq(self.g)) return self.f, self.g
def compute_functional_and_gradients(self): u_iso_refinable_params = self.apply_shifts() self.compute_target(compute_gradients=True) self.f = self.target_result.target() if (self.first_target_value is None): self.first_target_value = self.f if (self.occupancy_penalty is not None and self.grad_flags_counts != 0): occupancies = self.xray_structure.scatterers().extract_occupancies() for occupancy in occupancies: self.f += self.occupancy_penalty.functional(occupancy=occupancy) self.g = self.structure_factor_gradients( xray_structure=self.xray_structure, u_iso_refinable_params=u_iso_refinable_params, miller_set=self.target_functor.f_obs(), d_target_d_f_calc=self.target_result.derivatives(), n_parameters=self.x.size(), algorithm=self.structure_factor_algorithm).packed() if (self.occupancy_penalty is not None and self.grad_flags_counts != 0): g = flex.double() for occupancy in occupancies: g.append(self.occupancy_penalty.gradient(occupancy=occupancy)) del occupancies add_gradients( scatterers=self.xray_structure.scatterers(), xray_gradients=self.g, occupancy_gradients=g) del g if (self.verbose > 1): print "xray.minimization line search: f,rms(g):", print self.f, math.sqrt(flex.mean_sq(self.g)) return self.f, self.g
def generate_image(n,l, N=100): nmax = max(20,n) lfg = scitbx.math.log_factorial_generator(nmax) #rzfa = scitbx.math.zernike_2d_radial(n,l,lfg) #rap = scitbx.math.zernike_2d_polynome(n,l,rzfa) rap = scitbx.math.zernike_2d_polynome(n,l)#,rzfa) image = flex.vec3_double() original=open('original.dat','w') count = 0 for x in range(-N, N+1): for y in range(-N, N+1): rr = math.sqrt(x*x+y*y)/N if rr>1.0: value=0.0 else: tt = math.atan2(y,x) value = rap.f(rr,tt) value = value.real count = count + 1 image.append([x+N,y+N,value]) print(x+N,y+N, value, file=original) original.close() return image
def velocity_rescaling(self): if self.protein_thermostat and self.er_data is not None: if (self.kt.temperature <= 1.e-10): self.v_factor = 1.0 else: self.v_factor = math.sqrt(self.temperature / self.non_solvent_kt.temperature) else: if (self.kt.temperature <= 1.e-10): self.v_factor = 1.0 else: self.v_factor = math.sqrt(self.temperature / self.kt.temperature) self.vyz_vscale_remove = self.vxyz * (1.0 - self.v_factor) self.kt_vscale_remove = dynamics.kinetic_energy_and_temperature( self.vyz_vscale_remove, self.weights) self.vxyz = self.vxyz * self.v_factor
def test_resample(seed=0): obs_ori = flex.double(range(20)) npb_draw = scitbx.math.non_parametric_bootstrap(obs_ori, -seed - 1) obs = npb_draw.draw(100) npb = scitbx.math.non_parametric_bootstrap(obs, -seed - 2) sbs = scitbx.math.smooth_bootstrap(obs, -seed - 3) mean_t = flex.mean(obs_ori) var_t = flex.mean(obs_ori * obs_ori) - mean_t * mean_t mean_of_mean = 0 var_of_mean = 0 mean_sbs = 0 std_sbs = 0 n_sample = 1e3 size = 100.0 for iteration in range(int(n_sample)): sample = npb.draw(int(size)) sample_2 = sbs.draw(int(size)) single_mean = flex.mean(sample) mean_of_mean += single_mean var_of_mean += single_mean * single_mean tmp = flex.mean(sample_2) mean_sbs += tmp std_sbs += tmp * tmp mean_of_mean = mean_of_mean / n_sample var_of_mean = var_of_mean / (n_sample) - mean_of_mean * mean_of_mean var_of_mean = math.sqrt(var_of_mean) mean_sbs /= n_sample std_sbs = (std_sbs / (n_sample)) - mean_sbs * mean_sbs std_sbs = math.sqrt(std_sbs) assert math.fabs(9.5 - mean_of_mean) / var_of_mean < 4 assert math.fabs(9.5 - mean_sbs) / std_sbs < 4
def test_resample(seed=0): obs_ori=flex.double(range(20)) npb_draw = scitbx.math.non_parametric_bootstrap( obs_ori, -seed-1 ) obs = npb_draw.draw( 100 ) npb = scitbx.math.non_parametric_bootstrap( obs, -seed-2) sbs = scitbx.math.smooth_bootstrap( obs, -seed-3 ) mean_t = flex.mean( obs_ori ) var_t = flex.mean( obs_ori*obs_ori ) - mean_t*mean_t mean_of_mean = 0 var_of_mean = 0 mean_sbs = 0 std_sbs = 0 n_sample=1e3 size = 100.0 for iteration in range(int(n_sample)): sample = npb.draw(int(size)) sample_2 = sbs.draw(int(size)) single_mean = flex.mean( sample ) mean_of_mean += single_mean var_of_mean += single_mean*single_mean tmp = flex.mean( sample_2 ) mean_sbs += tmp std_sbs += tmp*tmp mean_of_mean = mean_of_mean/n_sample var_of_mean = var_of_mean/(n_sample) - mean_of_mean*mean_of_mean var_of_mean = math.sqrt( var_of_mean ) mean_sbs /=n_sample std_sbs = (std_sbs/(n_sample))-mean_sbs*mean_sbs std_sbs = math.sqrt( std_sbs ) assert math.fabs(9.5-mean_of_mean)/var_of_mean < 4 assert math.fabs(9.5-mean_sbs)/std_sbs < 4
def exercise(space_group_info, n_scatterers=8, d_min=2, verbose=0, e_min=1.5): structure = random_structure.xray_structure( space_group_info, elements=["const"] * n_scatterers, volume_per_atom=200, min_distance=3.0, general_positions_only=True, u_iso=0.0, ) if 0 or verbose: structure.show_summary().show_scatterers() f_calc = structure.structure_factors(d_min=d_min, anomalous_flag=False).f_calc() f_obs = abs(f_calc) q_obs = miller.array( miller_set=f_obs, data=f_obs.data() / math.sqrt(f_obs.space_group().order_p() * n_scatterers) / f_obs.space_group().n_ltr(), ) q_obs = q_obs.sort(by_value="abs") q_obs.setup_binner(auto_binning=True) n_obs = q_obs.quasi_normalize_structure_factors() r = flex.linear_regression(q_obs.data(), n_obs.data()) if 0 or verbose: r.show_summary() assert r.is_well_defined() assert abs(r.y_intercept()) < 0.1 assert abs(r.slope() - 1) < 0.2 q_large = q_obs.select(q_obs.quasi_normalized_as_normalized().data() > e_min) if 0 or verbose: print "Number of e-values > %.6g: %d" % (e_min, q_large.size()) other_structure = random_structure.xray_structure( space_group_info, elements=["const"] * n_scatterers, volume_per_atom=200, min_distance=3.0, general_positions_only=True, u_iso=0.0, ) assert other_structure.unit_cell().is_similar_to(structure.unit_cell()) q_calc = q_large.structure_factors_from_scatterers(other_structure, algorithm="direct").f_calc() start = q_large.phase_transfer(q_calc.data()) for selection_fixed in (None, flex.double([random.random() for i in xrange(start.size())]) < 0.4): from_map_data = direct_space_squaring(start, selection_fixed) direct_space_result = start.phase_transfer(phase_source=from_map_data) new_phases = reciprocal_space_squaring(start, selection_fixed, verbose) reciprocal_space_result = start.phase_transfer(phase_source=flex.polar(1, new_phases)) mwpe = direct_space_result.mean_weighted_phase_error(reciprocal_space_result) if 0 or verbose: print "mwpe: %.2f" % mwpe, start.space_group_info() for i, h in enumerate(direct_space_result.indices()): amp_d, phi_d = complex_math.abs_arg(direct_space_result.data()[i], deg=True) amp_r, phi_r = complex_math.abs_arg(reciprocal_space_result.data()[i], deg=True) phase_err = scitbx.math.phase_error(phi_d, phi_r, deg=True) assert phase_err < 1.0 or abs(from_map_data[i]) < 1.0e-6 exercise_truncate(q_large)
def exercise_match_indices(): h0 = flex.miller_index( ((1, 2, 3), (-1, -2, -3), (2, 3, 4), (-2, -3, -4), (3, 4, 5))) d0 = flex.double((1, 2, 3, 4, 5)) h1 = flex.miller_index(((-1, -2, -3), (-2, -3, -4), (1, 2, 3), (2, 3, 4))) d1 = flex.double((10, 20, 30, 40)) mi = miller.match_indices(h0, h0) assert mi.have_singles() == 0 assert list(mi.pairs()) == zip(range(5), range(5)) mi = miller.match_indices(h0, h1) assert tuple(mi.singles(0)) == (4, ) assert tuple(mi.singles(1)) == () assert tuple(mi.pairs()) == ((0, 2), (1, 0), (2, 3), (3, 1)) assert tuple(mi.pair_selection(0)) == (1, 1, 1, 1, 0) assert tuple(mi.single_selection(0)) == (0, 0, 0, 0, 1) assert tuple(mi.pair_selection(1)) == (1, 1, 1, 1) assert tuple(mi.single_selection(1)) == (0, 0, 0, 0) assert tuple(mi.paired_miller_indices(0)) \ == tuple(h0.select(mi.pair_selection(0))) l1 = list(mi.paired_miller_indices(1)) l2 = list(h1.select(mi.pair_selection(1))) l1.sort() l2.sort() assert l1 == l2 assert approx_equal(tuple(mi.plus(d0, d1)), (31, 12, 43, 24)) assert approx_equal(tuple(mi.minus(d0, d1)), (-29, -8, -37, -16)) assert approx_equal(tuple(mi.multiplies(d0, d1)), (30, 20, 120, 80)) assert approx_equal(tuple(mi.divides(d0, d1)), (1 / 30., 2 / 10., 3 / 40., 4 / 20.)) assert approx_equal(tuple(mi.additive_sigmas(d0, d1)), [ math.sqrt(x * x + y * y) for x, y in ((1, 30), (2, 10), (3, 40), (4, 20)) ]) q = flex.size_t((3, 2, 0, 4, 1)) h1 = h0.select(q) assert tuple(miller.match_indices(h1, h0).permutation()) == tuple(q) p = miller.match_indices(h0, h1).permutation() assert tuple(p) == (2, 4, 1, 0, 3) assert tuple(h1.select(p)) == tuple(h0) cd0 = [ complex(a, b) for (a, b) in (1, 1), (2, 0), (3.5, -1.5), (5, -3), (-8, 5.4) ] cd1 = [ complex(a, b) for (a, b) in (1, -1), (2, 1), (0.5, 1.5), (-1, -8), (10, 0) ] cd2 = flex.complex_double(cd0) cd3 = flex.complex_double(cd1) mi = miller.match_indices(h0, h0) assert approx_equal(tuple(mi.plus(cd2, cd3)), ((2 + 0j), (4 + 1j), (4 + 0j), (4 - 11j), (2 + 5.4j)))
def free_score(self, lni, rg2, start_q, stop_q): selection_low = flex.bool(self.data.q<start_q) selection_high = flex.bool(self.data.q>stop_q) selection_rg = flex.bool(self.data.q*math.sqrt(abs(rg2))<1.3) selection_above_min_q = flex.bool(self.data.q >= self.min_q ) #for q,i,j,k in zip(self.data.q,selection_low,selection_high,selection_rg): # print q,i,j,k, start_q, stop_q, math.sqrt(abs(rg2))*q tot_sel = ((selection_low | selection_high) & selection_above_min_q ) & selection_rg tmp_q = self.data.q.select( tot_sel ) tmp_i = self.data.i.select( tot_sel ) tmp_s = self.data.s.select( tot_sel ) score = None if tmp_s.size()>0: score = self.chi_square( lni, rg2, tmp_q, tmp_i, tmp_s ) return score
def tst_2d_poly(n,l): nmax=max(n,20) np=50 x,y=0.1,0.9 r,t=math.sqrt(x*x+y*y),math.atan2(y,x) lfg = scitbx.math.log_factorial_generator(nmax) #rzfa = scitbx.math.zernike_2d_radial(n,l,lfg) #rap = scitbx.math.zernike_2d_polynome(n,l,rzfa) rap = scitbx.math.zernike_2d_polynome(n,l)#,rzfa) rt_value=rap.f(r,t) grid = scitbx.math.two_d_grid(np, nmax) zm2d = scitbx.math.two_d_zernike_moments(grid, nmax) xy_value=zm2d.zernike_poly(n,l,x,y) print(rt_value, xy_value, abs(rt_value), abs(xy_value))
def tst_2d_zm(n,l): nmax=max(n,20) np=100 points=flex.double(range(-np,np+1))/np grid = scitbx.math.two_d_grid(np, nmax) zm2d = scitbx.math.two_d_zernike_moments(grid, nmax) image = flex.vec3_double() output=file('testmap.dat','w') for x in points: for y in points: r=math.sqrt(x*x+y*y) if(r>1.0): value=0.0 else: value=zm2d.zernike_poly(n,l,x,y).real image.append([x*np+np,y*np+np, value]) grid.clean_space( image ) grid.construct_space_sum() zernike_2d_mom = scitbx.math.two_d_zernike_moments( grid, nmax ) moments = zernike_2d_mom.moments() coefs = flex.real( moments ) nl_array = scitbx.math.nl_array( nmax ) nls = nl_array.nl() for nl, c in zip( nls, moments): if(abs(c)<1e-3): c=0 print(nl, c) NP=np*2+1 reconst=zernike_2d_mom.zernike_map(nmax, np) i = 0 for x in range(0,NP): for y in range(0,NP): value=reconst[i].real if(value>0): print(x,y,image[i][2],value, file=output) i=i+1 output.close()
def cholesky_decomposition(a, relative_eps=1.e-15): assert a.is_square_matrix() n = a.focus()[0] eps = relative_eps * flex.max(flex.abs(a)) c = flex.double(a.accessor(), 0) for k in xrange(n): sum = 0 for j in xrange(k): sum += c[(k,j)]**2 d = a[(k,k)] - sum if (d <= eps): return None c[(k,k)] = math.sqrt(d) for i in xrange(k+1,n): sum = 0 for j in xrange(k): sum += c[(i,j)] * c[(k,j)] c[(i,k)] = (a[(i,k)] - sum) / c[(k,k)] return c
def cholesky_decomposition(a, relative_eps=1.e-15): assert a.is_square_matrix() n = a.focus()[0] eps = relative_eps * flex.max(flex.abs(a)) c = flex.double(a.accessor(), 0) for k in range(n): sum = 0 for j in range(k): sum += c[(k,j)]**2 d = a[(k,k)] - sum if (d <= eps): return None c[(k,k)] = math.sqrt(d) for i in range(k+1,n): sum = 0 for j in range(k): sum += c[(i,j)] * c[(k,j)] c[(i,k)] = (a[(i,k)] - sum) / c[(k,k)] return c
def exercise_match_indices(): h0 = flex.miller_index(((1,2,3), (-1,-2,-3), (2,3,4), (-2,-3,-4), (3,4,5))) d0 = flex.double((1,2,3,4,5)) h1 = flex.miller_index(((-1,-2,-3), (-2,-3,-4), (1,2,3), (2,3,4))) d1 = flex.double((10,20,30,40)) mi = miller.match_indices(h0, h0) assert mi.have_singles() == 0 assert list(mi.pairs()) == zip(range(5), range(5)) mi = miller.match_indices(h0, h1) assert tuple(mi.singles(0)) == (4,) assert tuple(mi.singles(1)) == () assert tuple(mi.pairs()) == ((0,2), (1,0), (2,3), (3,1)) assert tuple(mi.pair_selection(0)) == (1, 1, 1, 1, 0) assert tuple(mi.single_selection(0)) == (0, 0, 0, 0, 1) assert tuple(mi.pair_selection(1)) == (1, 1, 1, 1) assert tuple(mi.single_selection(1)) == (0, 0, 0, 0) assert tuple(mi.paired_miller_indices(0)) \ == tuple(h0.select(mi.pair_selection(0))) l1 = list(mi.paired_miller_indices(1)) l2 = list(h1.select(mi.pair_selection(1))) l1.sort() l2.sort() assert l1 == l2 assert approx_equal(tuple(mi.plus(d0, d1)), (31, 12, 43, 24)) assert approx_equal(tuple(mi.minus(d0, d1)), (-29,-8,-37,-16)) assert approx_equal(tuple(mi.multiplies(d0, d1)), (30,20,120,80)) assert approx_equal(tuple(mi.divides(d0, d1)), (1/30.,2/10.,3/40.,4/20.)) assert approx_equal(tuple(mi.additive_sigmas(d0, d1)), [ math.sqrt(x*x+y*y) for x,y in ((1,30), (2,10), (3,40), (4,20))]) q = flex.size_t((3,2,0,4,1)) h1 = h0.select(q) assert tuple(miller.match_indices(h1, h0).permutation()) == tuple(q) p = miller.match_indices(h0, h1).permutation() assert tuple(p) == (2,4,1,0,3) assert tuple(h1.select(p)) == tuple(h0) cd0 = [ complex(a,b) for (a,b) in (1,1),(2,0),(3.5,-1.5),(5, -3),(-8,5.4) ] cd1 = [ complex(a,b) for (a,b) in (1,-1),(2,1),(0.5,1.5),(-1, -8),(10,0) ] cd2 = flex.complex_double(cd0) cd3 = flex.complex_double(cd1) mi = miller.match_indices(h0, h0) assert approx_equal(tuple(mi.plus(cd2,cd3)), ((2+0j), (4+1j), (4+0j), (4-11j), (2+5.4j)))
def example(): x_obs = flex.double( range(20) ) a = flex.double([1,2,3]) w_obs = flex.double(20,100.0) y_ideal = a[0] + a[1]*x_obs + a[2]*x_obs*x_obs y_obs = y_ideal + flex.random_double(size=x_obs.size())*1.5 for ii in range(20): print x_obs[ii], y_obs[ii] faker = fake_data( x_obs, y_obs) fit = polynomial_fit(x_obs=x_obs,y_obs=y_obs,w_obs=w_obs,n=3) print "------------------------------------------- " print " True and fitted coeffcients" print "------------------------------------------- " for i in range(a.size()): print i, a[i], fit.a[i] print "------------------------------------------- " print " Bootstrapped mean and standard deviations" print "------------------------------------------- " mean=[0,0,0] std=[0,0,0] for trial in range(100): x_new, y_new = faker.fake_it(20) fit = polynomial_fit(x_obs=x_new,y_obs=y_new,w_obs=w_obs,n=3) for i in range(a.size()): mean[i]+=fit.a[i] std[i]+=fit.a[i]*fit.a[i] for i in range(3): mean[i]/=100.0 std[i]/=100.0 std[i] -= mean[i]*mean[i] std[i] = math.sqrt( std[i] ) print i, mean[i], std[i]
def example(): x_obs = flex.double(range(20)) a = flex.double([1, 2, 3]) w_obs = flex.double(20, 100.0) y_ideal = a[0] + a[1] * x_obs + a[2] * x_obs * x_obs y_obs = y_ideal + flex.random_double(size=x_obs.size()) * 1.5 for ii in range(20): print x_obs[ii], y_obs[ii] faker = fake_data(x_obs, y_obs) fit = polynomial_fit(x_obs=x_obs, y_obs=y_obs, w_obs=w_obs, n=3) print "------------------------------------------- " print " True and fitted coeffcients" print "------------------------------------------- " for i in range(a.size()): print i, a[i], fit.a[i] print "------------------------------------------- " print " Bootstrapped mean and standard deviations" print "------------------------------------------- " mean = [0, 0, 0] std = [0, 0, 0] for trial in range(100): x_new, y_new = faker.fake_it(20) fit = polynomial_fit(x_obs=x_new, y_obs=y_new, w_obs=w_obs, n=3) for i in range(a.size()): mean[i] += fit.a[i] std[i] += fit.a[i] * fit.a[i] for i in range(3): mean[i] /= 100.0 std[i] /= 100.0 std[i] -= mean[i] * mean[i] std[i] = math.sqrt(std[i]) print i, mean[i], std[i]
def exercise_merge_equivalents(): i = flex.miller_index(((1,2,3), (1,2,3), (3,0,3), (3,0,3), (3,0,3), (1,1,2))) d = flex.double((1,2,3,4,5,6)) m = miller.ext.merge_equivalents_real(i, d) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert approx_equal(m.data, (3/2., 4, 6)) assert tuple(m.redundancies) == (2,3,1) assert approx_equal(m.r_linear, (1/3., 1/6., 0)) assert approx_equal(m.r_square, (0.1, 0.04, 0)) assert approx_equal(m.r_int, (1.+2.)/(3.+12.)) # s = flex.double((1/3.,1/2.,1/4.,1/6.,1/3.,1/5.)) m = miller.ext.merge_equivalents_obs(i, d, s, sigma_dynamic_range=2e-6) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert approx_equal(m.data, (17/13., (16*3+36*4+9*5)/(16+36+9.), 6)) assert approx_equal(m.sigmas, (math.sqrt(1/2./2),0.84077140277/3**0.5,1/5.)) assert m.sigma_dynamic_range == 2e-6 assert tuple(m.redundancies) == (2,3,1) assert approx_equal(m.r_linear, (1/3., 0.1762295, 0)) assert approx_equal(m.r_square, (0.1147929, 0.0407901, 0)) assert approx_equal(m.r_int, (abs(1-17/13.)+abs(2-17/13.) + abs(3-237/61.)+abs(4-237/61.)+abs(5-237/61.) ) / (1 + 2 + 3 + 4 + 5) ) # d = flex.complex_double( [complex(-1.706478, 0.248638), complex( 1.097872, -0.983523), complex( 0.147183, 2.625064), complex(-0.933310, 2.496886), complex( 1.745500, -0.686761), complex(-0.620066, 2.097776)]) m = miller.ext.merge_equivalents_complex(i, d) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert approx_equal(m.data, [ complex(-0.304303,-0.367443), complex( 0.319791, 1.478396), complex(-0.620066, 2.097776)]) assert tuple(m.redundancies) == (2,3,1) # d = flex.hendrickson_lattman( [(-1.706478, 0.248638, 1.653352, -2.411313), ( 1.097872, -0.983523, -2.756402, 0.294464), ( 0.147183, 2.625064, 1.003636, 2.563517), (-0.933310, 2.496886, 2.040418, 0.371885), ( 1.745500, -0.686761, -2.291345, -2.386650), (-0.620066, 2.097776, 0.099784, 0.268107)]) m = miller.ext.merge_equivalents_hl(i, d) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert approx_equal(m.data, [ (-0.3043030, -0.3674425, -0.5515250, -1.0584245), ( 0.3197910, 1.4783963, 0.2509030, 0.1829173), (-0.6200660, 2.0977760, 0.0997840, 0.2681070)]) assert tuple(m.redundancies) == (2,3,1) # d = flex.bool((True,True,False,False,False,True)) m = miller.ext.merge_equivalents_exact_bool(i, d) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert list(m.data) == [True, False, True] assert tuple(m.redundancies) == (2,3,1) d = flex.bool((True,True,False,True,False,True)) try: m = miller.ext.merge_equivalents_exact_bool(i, d) except RuntimeError as e: assert str(e) == "cctbx Error: merge_equivalents_exact:"\ " incompatible flags for hkl = (3, 0, 3)" else: raise Exception_expected d = flex.int((3,3,5,5,5,7)) m = miller.ext.merge_equivalents_exact_int(i, d) assert list(m.data) == [3, 5, 7] # i = flex.miller_index(((1,2,3), (3,0,3), (1,1,2))) d = flex.double((1,2,3)) m = miller.ext.merge_equivalents_real(i,d) assert m.r_int == 0
def tst_2d_zernike_mom(n,l, N=100, filename=None): nmax = max(20,n) rebuilt=open('rebuilt.dat','w') tt1=time.time() if(filename is not None): image=read_data(filename) else: image=generate_image(n,l) NP=int(math.sqrt( image.size() )) N=NP/2 grid_2d = scitbx.math.two_d_grid(N, nmax) grid_2d.clean_space( image ) grid_2d.construct_space_sum() tt2=time.time() print("time used: ", tt2-tt1) zernike_2d_mom = scitbx.math.two_d_zernike_moments( grid_2d, nmax ) moments = zernike_2d_mom.moments() tt2=time.time() print("time used: ", tt2-tt1) coefs = flex.real( moments ) nl_array = scitbx.math.nl_array( nmax ) nls = nl_array.nl() nl_array.load_coefs( nls, coefs ) lfg = scitbx.math.log_factorial_generator(nmax) print(nl_array.get_coef(n,l)*2) for nl, c in zip( nls, moments): if(abs(c)<1e-3): c=0 print(nl, c) print() reconst=flex.complex_double(NP**2, 0) for nl,c in zip( nls, moments): n=nl[0] l=nl[1] if(l>0): c=c*2 #rzfa = scitbx.math.zernike_2d_radial(n,l,lfg) rap = scitbx.math.zernike_2d_polynome(n,l) #,rzfa) i=0 for x in range(0,NP): x=x-N for y in range(0,NP): y=y-N rr = math.sqrt(x*x+y*y)/N if rr>1.0: value=0.0 else: tt = math.atan2(y,x) value = rap.f(rr,tt) reconst[i]=reconst[i]+value*c i=i+1 i = 0 for x in range(0,NP): for y in range(0,NP): value=reconst[i].real if(value>0): print(x,y,image[i][2],value, file=rebuilt) i=i+1 rebuilt.close()
def exercise(space_group_info, n_scatterers=8, d_min=2, verbose=0, e_min=1.5): structure = random_structure.xray_structure(space_group_info, elements=["const"] * n_scatterers, volume_per_atom=200, min_distance=3., general_positions_only=True, u_iso=0.0) if (0 or verbose): structure.show_summary().show_scatterers() f_calc = structure.structure_factors(d_min=d_min, anomalous_flag=False).f_calc() f_obs = abs(f_calc) q_obs = miller.array( miller_set=f_obs, data=f_obs.data() / math.sqrt(f_obs.space_group().order_p() * n_scatterers) / f_obs.space_group().n_ltr()) q_obs = q_obs.sort(by_value="abs") q_obs.setup_binner(auto_binning=True) n_obs = q_obs.quasi_normalize_structure_factors() r = flex.linear_regression(q_obs.data(), n_obs.data()) if (0 or verbose): r.show_summary() assert r.is_well_defined() assert abs(r.y_intercept()) < 0.1 assert abs(r.slope() - 1) < 0.2 q_large = q_obs.select( q_obs.quasi_normalized_as_normalized().data() > e_min) if (0 or verbose): print("Number of e-values > %.6g: %d" % (e_min, q_large.size())) other_structure = random_structure.xray_structure( space_group_info, elements=["const"] * n_scatterers, volume_per_atom=200, min_distance=3., general_positions_only=True, u_iso=0.0) assert other_structure.unit_cell().is_similar_to(structure.unit_cell()) q_calc = q_large.structure_factors_from_scatterers( other_structure, algorithm="direct").f_calc() start = q_large.phase_transfer(q_calc.data()) for selection_fixed in (None, flex.double( [random.random() for i in range(start.size())]) < 0.4): from_map_data = direct_space_squaring(start, selection_fixed) direct_space_result = start.phase_transfer(phase_source=from_map_data) new_phases = reciprocal_space_squaring(start, selection_fixed, verbose) reciprocal_space_result = start.phase_transfer( phase_source=flex.polar(1, new_phases)) mwpe = direct_space_result.mean_weighted_phase_error( reciprocal_space_result) if (0 or verbose): print("mwpe: %.2f" % mwpe, start.space_group_info()) for i, h in enumerate(direct_space_result.indices()): amp_d, phi_d = complex_math.abs_arg(direct_space_result.data()[i], deg=True) amp_r, phi_r = complex_math.abs_arg( reciprocal_space_result.data()[i], deg=True) phase_err = scitbx.math.phase_error(phi_d, phi_r, deg=True) assert phase_err < 1.0 or abs(from_map_data[i]) < 1.e-6 exercise_truncate(q_large)
def exercise_bins(): uc = uctbx.unit_cell((11,11,13,90,90,120)) sg_type = sgtbx.space_group_type("P 3 2 1") anomalous_flag = False d_min = 1 m = miller.index_generator(uc, sg_type, anomalous_flag, d_min).to_array() f = flex.double() for i in range(m.size()): f.append(random.random()) n_bins = 10 b = miller.binning(uc, n_bins, 0, d_min) b = miller.binning(uc, n_bins, 0, d_min, 1.e-6) b = miller.binning(uc, n_bins, m) b = miller.binning(uc, n_bins, m, 0) b = miller.binning(uc, n_bins, m, 0, d_min) b = miller.binning(uc, n_bins, m, 0, d_min, 1.e-6) assert b.d_max() == -1 assert approx_equal(b.d_min(), d_min) assert b.bin_d_range(0) == (-1,-1) assert approx_equal(b.bin_d_range(1), (-1,2.1544336)) assert approx_equal(b.bin_d_range(b.n_bins_all()-1), (1,-1)) d_star_sq = 0.5 r = b.bin_d_range(b.get_i_bin(d_star_sq)) d = 1/math.sqrt(d_star_sq) assert r[1] <= d <= r[0] h = (3,4,5) r = b.bin_d_range(b.get_i_bin(h)) assert r[1] <= uc.d(h) <= r[0] # a quick test to excercise d-spacings on fractional Miller indices: assert approx_equal( uc.d((3,4,5)), uc.d_frac((3.001,4,5)), eps=0.001) binning1 = miller.binning(uc, n_bins, m) assert binning1.unit_cell().is_similar_to(uc) assert binning1.n_bins_used() == n_bins assert binning1.limits().size() == n_bins + 1 assert binning1.n_bins_all() == n_bins + 2 s = pickle.dumps(binning1) l = pickle.loads(s) assert str(l.unit_cell()) == "(11, 11, 13, 90, 90, 120)" assert approx_equal(l.limits(), binning1.limits()) # binner1 = miller.ext.binner(binning1, m) assert binner1.miller_indices().id() == m.id() assert binner1.count(binner1.i_bin_d_too_large()) == 0 assert binner1.count(binner1.i_bin_d_too_small()) == 0 counts = binner1.counts() for i_bin in binner1.range_all(): assert binner1.count(i_bin) == counts[i_bin] assert binner1.selection(i_bin).count(True) == counts[i_bin] assert list(binner1.range_all()) == list(range(binner1.n_bins_all())) assert list(binner1.range_used()) == list(range(1, binner1.n_bins_used()+1)) binning2 = miller.binning(uc, n_bins - 2, binning1.bin_d_min(2), binning1.bin_d_min(n_bins)) binner2 = miller.ext.binner(binning2, m) assert tuple(binner1.counts())[1:-1] == tuple(binner2.counts()) array_indices = flex.size_t(range(m.size())) perm_array_indices1 = flex.size_t() perm_array_indices2 = flex.size_t() for i_bin in binner1.range_all(): perm_array_indices1.extend(array_indices.select(binner1.selection(i_bin))) perm_array_indices2.extend(binner1.array_indices(i_bin)) assert perm_array_indices1.size() == m.size() assert perm_array_indices2.size() == m.size() assert tuple(perm_array_indices1) == tuple(perm_array_indices2) b = miller.ext.binner(miller.binning(uc, n_bins, m, 0, d_min), m) assert approx_equal(b.bin_centers(1), (0.23207956, 0.52448148, 0.62711856, 0.70311998, 0.7652538, 0.818567, 0.86566877, 0.90811134, 0.94690405, 0.98274518)) assert approx_equal(b.bin_centers(2), (0.10772184, 0.27871961, 0.39506823, 0.49551249, 0.58642261, 0.67067026, 0.74987684, 0.82507452, 0.89697271, 0.96608584)) assert approx_equal(b.bin_centers(3), (0.050000075, 0.15000023, 0.25000038, 0.35000053, 0.45000068, 0.55000083, 0.65000098, 0.75000113, 0.85000128, 0.95000143)) v = flex.double(range(b.n_bins_used())) i = b.interpolate(v, 0) for i_bin in b.range_used(): assert i.select(b.selection(i_bin)).all_eq(v[i_bin-1]) dss = uc.d_star_sq(m) for d_star_power in (1,2,3): j = b.interpolate(v, d_star_power) x = flex.pow(dss, (d_star_power/2.)) r = flex.linear_correlation(x, j) assert r.is_well_defined() assert approx_equal( r.coefficient(), (0.946401,0.990764,1.0)[d_star_power-1], eps=1.e-4, multiplier=None) # s = pickle.dumps(binner2) l = pickle.loads(s) assert str(l.unit_cell()) == "(11, 11, 13, 90, 90, 120)" assert approx_equal(l.limits(), binner2.limits()) assert l.miller_indices().all_eq(binner2.miller_indices()) assert l.bin_indices().all_eq(binner2.bin_indices()) # limits = flex.random_double(size=10) bng = miller.binning(uc, limits) assert bng.unit_cell().is_similar_to(uc) assert approx_equal(bng.limits(), limits)
def filter(self, rg2, lni, score, free_score, stop_q, start_q, rat_lim=1.3 ): if rg2 > 0: if (math.sqrt( rg2)*stop_q)> rat_lim: if free_score is not None: return rg2, lni, score, free_score, start_q, stop_q return None,None,None,None,None,None
def exercise_merge_equivalents(): i = flex.miller_index(((1,2,3), (1,2,3), (3,0,3), (3,0,3), (3,0,3), (1,1,2))) d = flex.double((1,2,3,4,5,6)) m = miller.ext.merge_equivalents_real(i, d) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert approx_equal(m.data, (3/2., 4, 6)) assert tuple(m.redundancies) == (2,3,1) assert approx_equal(m.r_linear, (1/3., 1/6., 0)) assert approx_equal(m.r_square, (0.1, 0.04, 0)) assert approx_equal(m.r_int, (1.+2.)/(3.+12.)) # s = flex.double((1/3.,1/2.,1/4.,1/6.,1/3.,1/5.)) m = miller.ext.merge_equivalents_obs(i, d, s, sigma_dynamic_range=2e-6) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert approx_equal(m.data, (17/13., (16*3+36*4+9*5)/(16+36+9.), 6)) assert approx_equal(m.sigmas, (math.sqrt(1/2./2),0.84077140277/3**0.5,1/5.)) assert m.sigma_dynamic_range == 2e-6 assert tuple(m.redundancies) == (2,3,1) assert approx_equal(m.r_linear, (1/3., 0.1762295, 0)) assert approx_equal(m.r_square, (0.1147929, 0.0407901, 0)) assert approx_equal(m.r_int, (abs(1-17/13.)+abs(2-17/13.) + abs(3-237/61.)+abs(4-237/61.)+abs(5-237/61.) ) / (1 + 2 + 3 + 4 + 5) ) # d = flex.complex_double( [complex(-1.706478, 0.248638), complex( 1.097872, -0.983523), complex( 0.147183, 2.625064), complex(-0.933310, 2.496886), complex( 1.745500, -0.686761), complex(-0.620066, 2.097776)]) m = miller.ext.merge_equivalents_complex(i, d) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert approx_equal(m.data, [ complex(-0.304303,-0.367443), complex( 0.319791, 1.478396), complex(-0.620066, 2.097776)]) assert tuple(m.redundancies) == (2,3,1) # d = flex.hendrickson_lattman( [(-1.706478, 0.248638, 1.653352, -2.411313), ( 1.097872, -0.983523, -2.756402, 0.294464), ( 0.147183, 2.625064, 1.003636, 2.563517), (-0.933310, 2.496886, 2.040418, 0.371885), ( 1.745500, -0.686761, -2.291345, -2.386650), (-0.620066, 2.097776, 0.099784, 0.268107)]) m = miller.ext.merge_equivalents_hl(i, d) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert approx_equal(m.data, [ (-0.3043030, -0.3674425, -0.5515250, -1.0584245), ( 0.3197910, 1.4783963, 0.2509030, 0.1829173), (-0.6200660, 2.0977760, 0.0997840, 0.2681070)]) assert tuple(m.redundancies) == (2,3,1) # d = flex.bool((True,True,False,False,False,True)) m = miller.ext.merge_equivalents_exact_bool(i, d) assert tuple(m.indices) == ((1,2,3), (3,0,3), (1,1,2)) assert list(m.data) == [True, False, True] assert tuple(m.redundancies) == (2,3,1) d = flex.bool((True,True,False,True,False,True)) try: m = miller.ext.merge_equivalents_exact_bool(i, d) except RuntimeError, e: assert str(e) == "cctbx Error: merge_equivalents_exact:"\ " incompatible flags for hkl = (3, 0, 3)"
def exercise_bins(): uc = uctbx.unit_cell((11,11,13,90,90,120)) sg_type = sgtbx.space_group_type("P 3 2 1") anomalous_flag = False d_min = 1 m = miller.index_generator(uc, sg_type, anomalous_flag, d_min).to_array() f = flex.double() for i in xrange(m.size()): f.append(random.random()) n_bins = 10 b = miller.binning(uc, n_bins, 0, d_min) b = miller.binning(uc, n_bins, 0, d_min, 1.e-6) b = miller.binning(uc, n_bins, m) b = miller.binning(uc, n_bins, m, 0) b = miller.binning(uc, n_bins, m, 0, d_min) b = miller.binning(uc, n_bins, m, 0, d_min, 1.e-6) assert b.d_max() == -1 assert approx_equal(b.d_min(), d_min) assert b.bin_d_range(0) == (-1,-1) assert approx_equal(b.bin_d_range(1), (-1,2.1544336)) assert approx_equal(b.bin_d_range(b.n_bins_all()-1), (1,-1)) d_star_sq = 0.5 r = b.bin_d_range(b.get_i_bin(d_star_sq)) d = 1/math.sqrt(d_star_sq) assert r[1] <= d <= r[0] h = (3,4,5) r = b.bin_d_range(b.get_i_bin(h)) assert r[1] <= uc.d(h) <= r[0] # a quick test to excercise d-spacings on fractional Miller indices: assert approx_equal( uc.d((3,4,5)), uc.d_frac((3.001,4,5)), eps=0.001) binning1 = miller.binning(uc, n_bins, m) assert binning1.unit_cell().is_similar_to(uc) assert binning1.n_bins_used() == n_bins assert binning1.limits().size() == n_bins + 1 assert binning1.n_bins_all() == n_bins + 2 s = pickle.dumps(binning1) l = pickle.loads(s) assert str(l.unit_cell()) == "(11, 11, 13, 90, 90, 120)" assert approx_equal(l.limits(), binning1.limits()) # binner1 = miller.ext.binner(binning1, m) assert binner1.miller_indices().id() == m.id() assert binner1.count(binner1.i_bin_d_too_large()) == 0 assert binner1.count(binner1.i_bin_d_too_small()) == 0 counts = binner1.counts() for i_bin in binner1.range_all(): assert binner1.count(i_bin) == counts[i_bin] assert binner1.selection(i_bin).count(True) == counts[i_bin] assert list(binner1.range_all()) == range(binner1.n_bins_all()) assert list(binner1.range_used()) == range(1, binner1.n_bins_used()+1) binning2 = miller.binning(uc, n_bins - 2, binning1.bin_d_min(2), binning1.bin_d_min(n_bins)) binner2 = miller.ext.binner(binning2, m) assert tuple(binner1.counts())[1:-1] == tuple(binner2.counts()) array_indices = flex.size_t(range(m.size())) perm_array_indices1 = flex.size_t() perm_array_indices2 = flex.size_t() for i_bin in binner1.range_all(): perm_array_indices1.extend(array_indices.select(binner1.selection(i_bin))) perm_array_indices2.extend(binner1.array_indices(i_bin)) assert perm_array_indices1.size() == m.size() assert perm_array_indices2.size() == m.size() assert tuple(perm_array_indices1) == tuple(perm_array_indices2) b = miller.ext.binner(miller.binning(uc, n_bins, m, 0, d_min), m) assert approx_equal(b.bin_centers(1), (0.23207956, 0.52448148, 0.62711856, 0.70311998, 0.7652538, 0.818567, 0.86566877, 0.90811134, 0.94690405, 0.98274518)) assert approx_equal(b.bin_centers(2), (0.10772184, 0.27871961, 0.39506823, 0.49551249, 0.58642261, 0.67067026, 0.74987684, 0.82507452, 0.89697271, 0.96608584)) assert approx_equal(b.bin_centers(3), (0.050000075, 0.15000023, 0.25000038, 0.35000053, 0.45000068, 0.55000083, 0.65000098, 0.75000113, 0.85000128, 0.95000143)) v = flex.double(xrange(b.n_bins_used())) i = b.interpolate(v, 0) for i_bin in b.range_used(): assert i.select(b.selection(i_bin)).all_eq(v[i_bin-1]) dss = uc.d_star_sq(m) for d_star_power in (1,2,3): j = b.interpolate(v, d_star_power) x = flex.pow(dss, (d_star_power/2.)) r = flex.linear_correlation(x, j) assert r.is_well_defined() assert approx_equal( r.coefficient(), (0.946401,0.990764,1.0)[d_star_power-1], eps=1.e-4, multiplier=None) # s = pickle.dumps(binner2) l = pickle.loads(s) assert str(l.unit_cell()) == "(11, 11, 13, 90, 90, 120)" assert approx_equal(l.limits(), binner2.limits()) assert l.miller_indices().all_eq(binner2.miller_indices()) assert l.bin_indices().all_eq(binner2.bin_indices()) # limits = flex.random_double(size=10) bng = miller.binning(uc, limits) assert bng.unit_cell().is_similar_to(uc) assert approx_equal(bng.limits(), limits)