def compute_mu_bar(b1, b2, b3, b4, b5, b6): model = SimpleMosaicityParameterisation((b1, b2, b3, b4, b5, b6)) sigma = model.sigma() sigmap = R * sigma * R.transpose() sigma11 = matrix.sqr((sigmap[0], sigmap[1], sigmap[3], sigmap[4])) sigma12 = matrix.col((sigmap[2], sigmap[5])) sigma21 = matrix.col((sigmap[6], sigmap[7])).transpose() sigma22 = sigmap[8] return sigma12 * (1 / sigma22) * epsilon
def tst_ml_target_class_2(): numpy.random.seed(100) # The beam vector s0 = matrix.col((0, 0, 1)) # The covariance matrix sigma = matrix.sqr((1e-6, 0, 0, 0, 2e-6, 0, 0, 0, 3e-6)) # The number of reflections N = 100 # Generate a load of reflections s2_list, ctot_list, xbar_list, Sobs_list = generate_simple(s0, sigma, N=N) Sobs_list = flex.double(Sobs_list) parameterisation = SimpleMosaicityParameterisation((1, 0, 1, 0, 0, 1)) data = ProfileRefinerData(s0, s2_list, ctot_list, xbar_list, Sobs_list) refiner = ProfileRefiner(parameterisation, data) ml = refiner.refine() params = refiner.parameters # Create the covariance matrix M = matrix.sqr((params[0], 0, 0, params[1], params[2], 0, params[3], params[4], params[5])) sigma = M * M.transpose() # print sigma expected = matrix.sqr(( 9.91048657253e-07, -1.9828296735e-09, 2.25787032072e-09, -1.9828296735e-09, 1.98334108426e-06, 1.88097904832e-08, 2.25787032072e-09, 1.88097904832e-08, 2.99884748097e-06, )) assert all(1e6 * abs(a - b) < 1e-7 for a, b in zip(sigma, expected)) print("OK")
def generate_data(experiments, reflections): from random import seed seed(0) index = randint(0, len(reflections)) h = reflections[index]["miller_index"] s0 = matrix.col(experiments[0].beam.get_s0()) U_param = CrystalOrientationParameterisation(experiments[0].crystal) B_param = CrystalUnitCellParameterisation(experiments[0].crystal) U = matrix.sqr(experiments[0].crystal.get_U()) B = matrix.sqr(experiments[0].crystal.get_B()) r = U * B * matrix.col(h) s2 = s0 + r mobs = (s2 + matrix.col((uniform(0, 1e-3), uniform( 0, 1e-3), uniform(0, 1e-3)))).normalize() * s0.length() b1, b2, b3, b4, b5, b6 = ( uniform(1e-3, 3e-3), uniform(0.0, 1e-3), uniform(1e-3, 3e-3), uniform(0.0, 1e-3), uniform(0.0, 1e-3), uniform(1e-3, 3e-3), ) params = (b1, b2, b3, b4, b5, b6) S_param = SimpleMosaicityParameterisation(params) L_param = (uniform(1e-3, 2e-3), ) W_param = (uniform(1e-3, 2e-3), uniform(0, 1e-3), uniform(1e-3, 2e-3)) ctot = randint(100, 1000) T = matrix.sqr( (uniform(1e-3, 2e-3), 0, uniform(1e-6, 2e-6), uniform(1e-3, 2e-3))) Sobs = T * T.transpose() params = [S_param, U_param, B_param, L_param, W_param] return params, s0, h, ctot, mobs, Sobs
def target(self, params): parameterisation = SimpleMosaicityParameterisation(params) t = MaximumLikelihoodTarget( parameterisation, self.s0, self.s2_list, self.ctot_list, self.xbar_list, self.Sobs_list, ) lnL = t.log_likelihood() # print tuple(parameterisation.sigma()), lnL return -lnL
def test(): for i in range(10): (b1, b2, b3, b4, b5, b6), s0, s2, ctot, mobs, Sobs = generate_data() parameterisation = SimpleMosaicityParameterisation( (b1, b2, b3, b4, b5, b6)) reflection_model = ReflectionData(parameterisation, s0, s2, ctot, mobs, Sobs, second_derivatives=True) test_first_derivatives(reflection_model) test_second_derivatives(reflection_model) print("OK")
def compute_L(b1, b2, b3, b4, b5, b6): model = SimpleMosaicityParameterisation((b1, b2, b3, b4, b5, b6)) sigma = model.sigma() sigmap = R * sigma * R.transpose() sigma11 = matrix.sqr((sigmap[0], sigmap[1], sigmap[3], sigmap[4])) sigma12 = matrix.col((sigmap[2], sigmap[5])) sigma21 = matrix.col((sigmap[6], sigmap[7])).transpose() sigma22 = sigmap[8] z = s0.length() mubar = sigma12 * (1 / sigma22) * (z - mu2) sigma_bar = sigma11 - sigma12 * (1 / sigma22) * sigma21 d = r - mu2 c_d = mubar - mobs A = log(sigma22) B = (1 / sigma22) * d**2 C = log(sigma_bar.determinant()) * ctot D = (sigma_bar.inverse() * ctot * Sobs).trace() E = (sigma_bar.inverse() * ctot * c_d * c_d.transpose()).trace() return -0.5 * (A + B + C + D + E)
def run(): # np.random.seed(1) # seed(1) # The number of times to run each experiment m = 10 # The results results = defaultdict(list) # Generate a covariance matrix print("Generating covariance matrix") sigma = generate_sigma() # Repeat the experiment m times for i in range(m): # Set the beam vector s0 = matrix.col((0, 0, 1)) # sigma = matrix.sqr((2.7e-05, -2.33e-05, -1.26e-06, # -2.33e-05, 6.08e-05, -1.01e-05, # -1.26e-06, -1.01e-05, 4.41e-05)) print("Known Sigma") print("( %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, )" % tuple(sigma)) # Generate some samples print("Generating samples") samples = list(zip(*generate_simple(s0, sigma, 1000))) # Iterate over the number of samples to use (logarithmically between 10 and # 1000). # A = log(10) # B = log(1001) # N = (B - A) / 10.0 # for t in np.arange(A, log(1001), N): # n = int(floor(exp(t))) # N_list = [900] N_list = list(range(3, 20)) N_list = (list(range(3, 20)) + list(range(20, 200, 10)) + list(range(200, 1000, 100))) for n in N_list: # Select a sample print("Selecting %d samples" % n) subsample = sample(samples, n) # Estimate the parameters print("Estimating parameters") s2_list, ctot_list, mobs_list, Sobs_list = zip(*subsample) Sobs_list = flex.double(Sobs_list) refiner = ProfileRefiner( SimpleMosaicityParameterisation((1, 0, 1, 0, 0, 1)), ProfileRefinerData(s0, s2_list, ctot_list, mobs_list, Sobs_list), ) refiner.refine() params = refiner.parameters # Compute sigma from the parameters M = matrix.sqr(( params[0], 0, 0, params[1], params[2], 0, params[3], params[4], params[5], )) sigma_cal = M * M.transpose() # Compute the KL divergence kl = kl_divergence(sigma, sigma_cal) print("Calculated Sigma") print("( %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, )" % tuple(sigma_cal)) print(n, kl) print("") results[n].append(kl) print("Known Sigma") print("( %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, %.3g, )" % tuple(sigma)) with open("result.txt", "w") as outfile: for n in sorted(results.keys()): print("%d %.6f" % (n, sum(results[n]) / len(results[n])), file=outfile)
def tst_binned(): numpy.random.seed(100) # Ensure we have a data block experiments = ExperimentListFactory.from_json_file("experiments.json") experiments[0].scan.set_oscillation((0, 1.0), deg=True) experiments[0].beam.set_s0((0, 0, -1)) s0 = matrix.col(experiments[0].beam.get_s0()) # The predicted reflections reflections = flex.reflection_table.from_predictions_multi(experiments, padding=4) print(len(reflections)) sigma = matrix.sqr((1e-6, 0, 0, 0, 2e-6, 0, 0, 0, 3e-6)) reflections = generate_observations2(experiments, reflections, sigma) s2_list, ctot_list, xbar_list, Sobs_list = generate_from_reflections_binned( s0, sigma, reflections) index = sample(range(len(s2_list)), 200) def select_sample(d, index): return [d[i] for i in index] s2_list = select_sample(s2_list, index) ctot_list = select_sample(ctot_list, index) xbar_list = select_sample(xbar_list, index) Sobs_list = select_sample(Sobs_list, index) print("Using %d reflections: " % len(s2_list)) values = flex.double((sqrt(1e-6), 0, sqrt(2e-6), 0, 0, sqrt(3e-6))) offset = flex.double([sqrt(1e-7) for v in values]) parameterisation = SimpleMosaicityParameterisation((1, 0, 1, 0, 0, 1)) Sobs_list = flex.double(Sobs_list) data = ProfileRefinerData(s0, s2_list, ctot_list, xbar_list, Sobs_list) refiner = ProfileRefiner(parameterisation, data) ml = refiner.refine() params = refiner.parameters # optimizer = SimpleSimplex( # values, # offset, # Target( # s0, # s2_list, # xbar_list, # ctot_list, # Sobs_list, # test=0), 2000) # params = optimizer.get_solution() M = matrix.sqr((params[0], 0, 0, params[1], params[2], 0, params[3], params[4], params[5])) sigma = M * M.transpose() print(sigma) expected = matrix.sqr(( 1.07025484551e-06, 1.30518861783e-09, -1.72635922351e-09, 1.30518861783e-09, 2.10252906788e-06, -1.64646310672e-08, -1.72635922351e-09, -1.64646310672e-08, 3.12149393966e-06, )) assert all(1e6 * abs(a - b) < 1e-7 for a, b in zip(sigma, expected)) print("OK")
def compute_sigma22(b1, b2, b3, b4, b5, b6): model = SimpleMosaicityParameterisation((b1, b2, b3, b4, b5, b6)) sigma = model.sigma() sigmap = R * sigma * R.transpose() sigma22 = sigmap[8] return sigma22