def prepare_observations_for_scaling(work_params, obs, reference_intensities=None, half_data_flag=0, files=None): result = intensity_data() result.frame = obs["frame_lookup"] result.miller = obs['miller_lookup'] result.origHKL = flex.miller_index(obs["original_H"], obs["original_K"], obs["original_L"]) raw_obs = obs["observed_intensity"] sigma_obs = obs["observed_sigI"] if half_data_flag in [ 1, 2 ]: # apply selection after random numbers have been applied if files == None: half_data_selection = (obs["frame_lookup"] % 2) == (half_data_flag % 2) else: # if file names are available, base half data selection on the last digit in filename. extension = work_params.filename_extension frame_selection = flex.bool([ (half_data_flag==1 and (int(item.split("."+extension)[0][-1])%2==1)) or \ (half_data_flag==2 and (int(item.split("."+extension)[0][-1])%2==0)) for item in files]) half_data_selection = frame_selection.select(obs["frame_lookup"]) result.frame = obs["frame_lookup"].select(half_data_selection) result.miller = obs['miller_lookup'].select(half_data_selection) result.origHKL = result.origHKL.select(half_data_selection) raw_obs = raw_obs.select(half_data_selection) sigma_obs = sigma_obs.select(half_data_selection) mean_signal = flex.mean(raw_obs) mean_sigma = flex.mean(sigma_obs) print("<I> / <sigma>", (mean_signal / mean_sigma)) scale_factor = mean_signal / 10. print("Mean signal is", mean_signal, "Applying a constant scale factor of ", scale_factor) SDFAC_FROM_CHISQ = work_params.levmar.sdfac_value #most important line; puts input data on a numerically reasonable scale # XXX result.raw_obs = raw_obs / scale_factor scaled_sigma = SDFAC_FROM_CHISQ * sigma_obs / scale_factor result.exp_var = scaled_sigma * scaled_sigma #reference intensities gets us the unit cell & miller indices to # gain a static array of (sin theta over lambda)**2 if reference_intensities is not None: uc = reference_intensities.unit_cell() stol_sq = flex.double() for i in range(len(result.miller)): this_hkl = reference_intensities.indices()[result.miller[i]] stol_sq_item = uc.stol_sq(this_hkl) stol_sq.append(stol_sq_item) result.stol_sq = stol_sq return result
def prepare_simulation_with_noise(sim, transmittance, apply_noise, ordered_intensities=None, half_data_flag=0): result = intensity_data() result.frame = sim["frame_lookup"] result.miller = sim['miller_lookup'] raw_obs_no_noise = transmittance * sim['observed_intensity'] if apply_noise: import scitbx.random from scitbx.random import variate, normal_distribution # bernoulli_distribution, gamma_distribution, poisson_distribution scitbx.random.set_random_seed(321) g = variate(normal_distribution()) noise = flex.sqrt(raw_obs_no_noise) * g(len(raw_obs_no_noise)) # adds in Gauss noise to signal else: noise = flex.double(len(raw_obs_no_noise), 0.) raw_obs = raw_obs_no_noise + noise if half_data_flag in [ 1, 2 ]: # apply selection after random numbers have been applied half_data_selection = (sim["frame_lookup"] % 2) == (half_data_flag % 2) result.frame = sim["frame_lookup"].select(half_data_selection) result.miller = sim['miller_lookup'].select(half_data_selection) raw_obs = raw_obs.select(half_data_selection) mean_signal = flex.mean(raw_obs) sigma_obs = flex.sqrt(flex.abs(raw_obs)) mean_sigma = flex.mean(sigma_obs) print("<I> / <sigma>", (mean_signal / mean_sigma)) scale_factor = mean_signal / 10. print("Mean signal is", mean_signal, "Applying a constant scale factor of ", scale_factor) #most important line; puts input data on a numerically reasonable scale result.raw_obs = raw_obs / scale_factor scaled_sigma = sigma_obs / scale_factor result.exp_var = scaled_sigma * scaled_sigma #ordered intensities gets us the unit cell & miller indices to # gain a static array of (sin theta over lambda)**2 if ordered_intensities is not None: uc = ordered_intensities.unit_cell() stol_sq = flex.double() for i in range(len(result.miller)): this_hkl = ordered_intensities.indices()[result.miller[i]] stol_sq_item = uc.stol_sq(this_hkl) stol_sq.append(stol_sq_item) result.stol_sq = stol_sq return result
def prepare_simulation_with_noise(sim, transmittance, apply_noise, ordered_intensities=None, half_data_flag = 0): result = intensity_data() result.frame = sim["frame_lookup"] result.miller= sim['miller_lookup'] raw_obs_no_noise = transmittance * sim['observed_intensity'] if apply_noise: import scitbx.random from scitbx.random import variate, normal_distribution # bernoulli_distribution, gamma_distribution, poisson_distribution scitbx.random.set_random_seed(321) g = variate(normal_distribution()) noise = flex.sqrt(raw_obs_no_noise) * g(len(raw_obs_no_noise)) # adds in Gauss noise to signal else: noise = flex.double(len(raw_obs_no_noise),0.) raw_obs = raw_obs_no_noise + noise if half_data_flag in [1,2]: # apply selection after random numbers have been applied half_data_selection = (sim["frame_lookup"]%2)==(half_data_flag%2) result.frame = sim["frame_lookup"].select(half_data_selection) result.miller = sim['miller_lookup'].select(half_data_selection) raw_obs = raw_obs.select(half_data_selection) mean_signal = flex.mean(raw_obs) sigma_obs = flex.sqrt(flex.abs(raw_obs)) mean_sigma = flex.mean(sigma_obs) print "<I> / <sigma>", (mean_signal/ mean_sigma) scale_factor = mean_signal/10. print "Mean signal is",mean_signal,"Applying a constant scale factor of ",scale_factor #most important line; puts input data on a numerically reasonable scale result.raw_obs = raw_obs / scale_factor scaled_sigma = sigma_obs / scale_factor result.exp_var = scaled_sigma * scaled_sigma #ordered intensities gets us the unit cell & miller indices to # gain a static array of (sin theta over lambda)**2 if ordered_intensities is not None: uc = ordered_intensities.unit_cell() stol_sq = flex.double() for i in xrange(len(result.miller)): this_hkl = ordered_intensities.indices()[result.miller[i]] stol_sq_item = uc.stol_sq(this_hkl) stol_sq.append(stol_sq_item) result.stol_sq = stol_sq return result
def __init__(self, Ibase, Gbase, Bbase, I_visited, G_visited, FSIM, **kwargs): g_counter = 0 forward_map_G = flex.size_t(len(G_visited)) backward_map_G = flex.size_t() for s in xrange(len(G_visited)): #print s, G_visited[s], c[len_I + s], c[len_I + len(Gbase) + s] if G_visited[s]: forward_map_G[s] = g_counter backward_map_G.append(s) g_counter += 1 subsetGbase = Gbase.select(backward_map_G) subsetBbase = Bbase.select(backward_map_G) remapped_frame = forward_map_G.select(FSIM.frame) i_counter = 0 forward_map_I = flex.size_t(len(I_visited)) backward_map_I = flex.size_t() for s in xrange(len(I_visited)): #print s,I_visited[s], c[s] if I_visited[s]: forward_map_I[s] = i_counter backward_map_I.append(s) i_counter += 1 subsetIbase = Ibase.select(backward_map_I) remapped_miller = forward_map_I.select(FSIM.miller) from cctbx.examples.merging import intensity_data remapped_FSIM = intensity_data() remapped_FSIM.raw_obs = FSIM.raw_obs remapped_FSIM.exp_var = FSIM.exp_var remapped_FSIM.stol_sq = FSIM.stol_sq remapped_FSIM.frame = remapped_frame remapped_FSIM.miller = remapped_miller base_class.__init__(self, subsetIbase, subsetGbase, subsetBbase, remapped_FSIM, **kwargs) fitted_I, fitted_G, fitted_B = self.unpack() self.expanded_G = flex.double(len(Gbase)) self.expanded_B = flex.double(len(Gbase)) for s in xrange(len(G_visited)): if G_visited[s]: self.expanded_G[s] = fitted_G[forward_map_G[s]] self.expanded_B[s] = fitted_B[forward_map_G[s]] self.expanded_I = flex.double(len(Ibase)) for s in xrange(len(I_visited)): if I_visited[s]: self.expanded_I[s] = fitted_I[forward_map_I[s]] print "DONE UNMAPPING HERE"
def prepare_observations_for_scaling(work_params,obs,reference_intensities=None, half_data_flag = 0,files = None): result = intensity_data() result.frame = obs["frame_lookup"] result.miller= obs['miller_lookup'] result.origHKL = flex.miller_index(obs["original_H"],obs["original_K"],obs["original_L"]) raw_obs = obs["observed_intensity"] sigma_obs = obs["observed_sigI"] if half_data_flag in [1,2]: # apply selection after random numbers have been applied if files==None: half_data_selection = (obs["frame_lookup"]%2)==(half_data_flag%2) else: # if file names are available, base half data selection on the last digit in filename. extension = work_params.filename_extension frame_selection = flex.bool([ (half_data_flag==1 and (int(item.split("."+extension)[0][-1])%2==1)) or \ (half_data_flag==2 and (int(item.split("."+extension)[0][-1])%2==0)) for item in files]) half_data_selection = frame_selection.select(obs["frame_lookup"]) result.frame = obs["frame_lookup"].select(half_data_selection) result.miller = obs['miller_lookup'].select(half_data_selection) result.origHKL = result.origHKL.select(half_data_selection) raw_obs = raw_obs.select(half_data_selection) sigma_obs = sigma_obs.select(half_data_selection) mean_signal = flex.mean(raw_obs) mean_sigma = flex.mean(sigma_obs) print "<I> / <sigma>", (mean_signal/ mean_sigma) scale_factor = mean_signal/10. print "Mean signal is",mean_signal,"Applying a constant scale factor of ",scale_factor SDFAC_FROM_CHISQ = work_params.levmar.sdfac_value #most important line; puts input data on a numerically reasonable scale # XXX result.raw_obs = raw_obs / scale_factor scaled_sigma = SDFAC_FROM_CHISQ * sigma_obs / scale_factor result.exp_var = scaled_sigma * scaled_sigma #reference intensities gets us the unit cell & miller indices to # gain a static array of (sin theta over lambda)**2 if reference_intensities is not None: uc = reference_intensities.unit_cell() stol_sq = flex.double() for i in xrange(len(result.miller)): this_hkl = reference_intensities.indices()[result.miller[i]] stol_sq_item = uc.stol_sq(this_hkl) stol_sq.append(stol_sq_item) result.stol_sq = stol_sq return result
def __init__(self,Ibase,Gbase,Bbase,I_visited,G_visited,FSIM,**kwargs): g_counter=0; forward_map_G=flex.size_t(len(G_visited)); backward_map_G=flex.size_t() for s in xrange(len(G_visited)): #print s, G_visited[s], c[len_I + s], c[len_I + len(Gbase) + s] if G_visited[s]: forward_map_G[s] = g_counter backward_map_G.append(s) g_counter+=1 subsetGbase = Gbase.select(backward_map_G) subsetBbase = Bbase.select(backward_map_G) remapped_frame = forward_map_G.select(FSIM.frame) i_counter=0; forward_map_I=flex.size_t(len(I_visited)); backward_map_I=flex.size_t() for s in xrange(len(I_visited)): #print s,I_visited[s], c[s] if I_visited[s]: forward_map_I[s] = i_counter backward_map_I.append(s) i_counter+=1 subsetIbase = Ibase.select(backward_map_I) remapped_miller = forward_map_I.select(FSIM.miller) from cctbx.examples.merging import intensity_data remapped_FSIM = intensity_data() remapped_FSIM.raw_obs = FSIM.raw_obs remapped_FSIM.exp_var = FSIM.exp_var remapped_FSIM.stol_sq = FSIM.stol_sq remapped_FSIM.frame = remapped_frame remapped_FSIM.miller = remapped_miller base_class.__init__(self,subsetIbase,subsetGbase,subsetBbase,remapped_FSIM,**kwargs) fitted_I,fitted_G,fitted_B = self.unpack() self.expanded_G = flex.double(len(Gbase)) self.expanded_B = flex.double(len(Gbase)) for s in xrange(len(G_visited)): if G_visited[s]: self.expanded_G[s]=fitted_G[ forward_map_G[s] ] self.expanded_B[s]=fitted_B[ forward_map_G[s] ] self.expanded_I = flex.double(len(Ibase)) for s in xrange(len(I_visited)): if I_visited[s]: self.expanded_I[s]=fitted_I[ forward_map_I[s] ] print "DONE UNMAPPING HERE"
def __init__(self, Ibase, Gbase, I_visited, G_visited, FSIM, **kwargs): g_counter = 0 forward_map_G = flex.size_t(len(G_visited)) backward_map_G = flex.size_t() for s in xrange(len(G_visited)): #print s, G_visited[s], c[len_I + s], c[len_I + len(Gbase) + s] if G_visited[s]: forward_map_G[s] = g_counter backward_map_G.append(s) g_counter += 1 subsetGbase = Gbase.select(backward_map_G) remapped_frame = forward_map_G.select(FSIM.frame) i_counter = 0 forward_map_I = flex.size_t(len(I_visited)) backward_map_I = flex.size_t() for s in xrange(len(I_visited)): #print s,I_visited[s], c[s] if I_visited[s]: forward_map_I[s] = i_counter backward_map_I.append(s) i_counter += 1 subsetIbase = Ibase.select(backward_map_I) remapped_miller = forward_map_I.select(FSIM.miller) from cctbx.examples.merging import intensity_data remapped_FSIM = intensity_data() remapped_FSIM.raw_obs = FSIM.raw_obs remapped_FSIM.exp_var = FSIM.exp_var remapped_FSIM.stol_sq = FSIM.stol_sq remapped_FSIM.origHKL = FSIM.origHKL remapped_FSIM.frame = remapped_frame remapped_FSIM.miller = remapped_miller if kwargs.has_key('experiments'): # XXX seems like we need to implement a proper select statement for ExperimentList # kwargs["experiments"] = kwargs["experiments"].select(G_visited==1) from dxtbx.model import ExperimentList new_experiments = ExperimentList() for idx in xrange(len(G_visited)): if G_visited[idx] == 1: new_experiments.append(kwargs["experiments"][idx]) kwargs["experiments"] = new_experiments base_class.__init__(self, subsetIbase, subsetGbase, remapped_FSIM, **kwargs) fitted = self.unpack() fitted_stddev = self.unpack_stddev() def help_expand_data(data): result = {} for key in data.keys(): if key == "I": ex = flex.double(len(Ibase)) for s in xrange(len(I_visited)): if I_visited[s]: ex[s] = data[key][forward_map_I[s]] result[key] = ex elif key in ["G", "B", "D", "Ax", "Ay"]: ex = flex.double(len(Gbase)) for s in xrange(len(G_visited)): if G_visited[s]: ex[s] = data[key][forward_map_G[s]] result[key] = ex return result self.expanded = help_expand_data(fitted) self.expanded_stddev = help_expand_data(fitted_stddev) print "DONE UNMAPPING HERE"
def __init__(self,Ibase,Gbase,I_visited,G_visited,FSIM,**kwargs): g_counter=0; forward_map_G=flex.size_t(len(G_visited)); backward_map_G=flex.size_t() for s in xrange(len(G_visited)): #print s, G_visited[s], c[len_I + s], c[len_I + len(Gbase) + s] if G_visited[s]: forward_map_G[s] = g_counter backward_map_G.append(s) g_counter+=1 subsetGbase = Gbase.select(backward_map_G) remapped_frame = forward_map_G.select(FSIM.frame) i_counter=0; forward_map_I=flex.size_t(len(I_visited)); backward_map_I=flex.size_t() for s in xrange(len(I_visited)): #print s,I_visited[s], c[s] if I_visited[s]: forward_map_I[s] = i_counter backward_map_I.append(s) i_counter+=1 subsetIbase = Ibase.select(backward_map_I) remapped_miller = forward_map_I.select(FSIM.miller) from cctbx.examples.merging import intensity_data remapped_FSIM = intensity_data() remapped_FSIM.raw_obs = FSIM.raw_obs remapped_FSIM.exp_var = FSIM.exp_var remapped_FSIM.stol_sq = FSIM.stol_sq remapped_FSIM.origHKL = FSIM.origHKL remapped_FSIM.frame = remapped_frame remapped_FSIM.miller = remapped_miller if kwargs.has_key('experiments'): # XXX seems like we need to implement a proper select statement for ExperimentList # kwargs["experiments"] = kwargs["experiments"].select(G_visited==1) from dxtbx.model.experiment.experiment_list import ExperimentList new_experiments = ExperimentList() for idx in xrange(len(G_visited)): if G_visited[idx]==1: new_experiments.append(kwargs["experiments"][idx]) kwargs["experiments"] = new_experiments base_class.__init__(self,subsetIbase,subsetGbase,remapped_FSIM,**kwargs) fitted = self.unpack() fitted_stddev = self.unpack_stddev() def help_expand_data(data): result = {} for key in data.keys(): if key=="I": ex = flex.double(len(Ibase)) for s in xrange(len(I_visited)): if I_visited[s]: ex[s] = data[key][forward_map_I[s]] result[key]=ex elif key in ["G", "B", "D", "Ax", "Ay"]: ex = flex.double(len(Gbase)) for s in xrange(len(G_visited)): if G_visited[s]: ex[s] = data[key][forward_map_G[s]] result[key]=ex return result self.expanded = help_expand_data(fitted) self.expanded_stddev = help_expand_data(fitted_stddev) print "DONE UNMAPPING HERE"