def assign_operators(self, reidx_ops=None): arrays = self.arrays self.best_operators = None if reidx_ops is None: reidx_ops = self.find_reindex_ops() print >> self.log_out, "Reindex operators:", map( lambda x: str(x.as_hkl()), reidx_ops) print >> self.log_out, "" reidx_ops.sort( key=lambda x: not x.is_identity_op()) # identity op to first data = None latt_id = flex.int([]) for i, a in enumerate(arrays): if data is None: data = a else: data = data.concatenate(a, assert_is_similar_symmetry=False) latt_id.extend(flex.int(a.size(), i)) latt_id = data.customized_copy(data=latt_id.as_double()) result = brehm_diederichs.run(L=[data, latt_id], nproc=self.nproc, verbose=True) self.best_operators = map(lambda x: None, xrange(len(arrays))) for op in result: idxes = map(int, result[op]) print >> self.log_out, " %s num=%3d idxes= %s" % ( op, len(result[op]), idxes) for idx in idxes: self.best_operators[idx] = sgtbx.change_of_basis_op(op)
def assign_operators(self, reidx_ops=None): arrays = self.arrays self.best_operators = None if reidx_ops is None: reidx_ops = self.find_reindex_ops() print >>self.log_out, "Reindex operators:", map(lambda x: str(x.as_hkl()), reidx_ops) print >>self.log_out, "" reidx_ops.sort(key=lambda x: not x.is_identity_op()) # identity op to first data = None latt_id = flex.int([]) for i, a in enumerate(arrays): if data is None: data = a else: data = data.concatenate(a, assert_is_similar_symmetry=False) latt_id.extend(flex.int(a.size(), i)) latt_id = data.customized_copy(data=latt_id.as_double()) result = brehm_diederichs.run(L=[data, latt_id], nproc=self.nproc, plot=True, verbose=True) self.best_operators = map(lambda x: None, xrange(len(arrays))) for op in result: idxes = map(int, result[op]) print >>self.log_out, " %s num=%3d idxes= %s" %(op, len(result[op]), idxes) for idx in idxes: self.best_operators[idx] = sgtbx.change_of_basis_op(op)
def run(args): phil = iotbx.phil.process_command_line(args=args, master_string=master_phil).show() work_params = phil.work.extract() log = open( "%s_%s_merging.log" % (work_params.output.prefix, work_params.scaling.algorithm), "w") out = multi_out() out.register("log", log, atexit_send_to=None) out.register("stdout", sys.stdout) print >> out, "Target unit cell and space group:" print >> out, " ", work_params.target_unit_cell print >> out, " ", work_params.target_space_group miller_set = symmetry( unit_cell=work_params.target_unit_cell, space_group_info=work_params.target_space_group).build_miller_set( anomalous_flag=not work_params.merge_anomalous, d_min=work_params.d_min) from xfel.merging.general_fcalc import random_structure i_model = random_structure(work_params) # ---- Augment this code with any special procedures for x scaling scaler = xscaling_manager(miller_set=miller_set, i_model=i_model, params=work_params, log=out) scaler.read_all_mysql() print "finished reading the database" sg = miller_set.space_group() hkl_asu = scaler.observations["hkl_id"] imageno = scaler.observations["frame_id"] intensi = scaler.observations["i"] lookup = scaler.millers["merged_asu_hkl"] origH = scaler.observations["H"] origK = scaler.observations["K"] origL = scaler.observations["L"] from cctbx.array_family import flex print "# observations from the database", len( scaler.observations["hkl_id"]) hkl = flex.miller_index(flex.select(lookup, hkl_asu)) from cctbx import miller hkl_list = miller_set.customized_copy(indices=hkl) ARRAY = miller.array(miller_set=hkl_list, data=intensi) LATTICES = miller.array(miller_set=hkl_list, data=imageno) from cctbx.merging.brehm_diederichs import run_multiprocess, run L = (ARRAY, LATTICES) # tuple(data,lattice_id) from libtbx import easy_pickle presort_file = work_params.output.prefix + "_intensities_presort.pickle" print "pickling these intensities to", presort_file easy_pickle.dump(presort_file, L) ###### INPUTS ####### # data = miller array: ASU miller index + intensity (sigmas not implemented yet) # lattice_id = flex double: assignment of each miller index to a lattice number ###################### if work_params.nproc < 5: print "Sorting the lattices with 1 processor" result = run(L, nproc=1, verbose=True) else: print "Sorting the lattices with %d processors" % work_params.nproc result = run_multiprocess(L, nproc=work_params.nproc, verbose=False) for key in result.keys(): print key, len(result[key]) # 2) pickle the postsort (reindexed) ARRAY, LATTICES XXX not done yet; not clear if needed reverse_lookup = {} frame_id_list = list(scaler.frames_mysql["frame_id"]) for key in result.keys(): for frame in result[key]: frame_idx = frame_id_list.index(frame) reverse_lookup[scaler.frames_mysql["unique_file_name"] [frame_idx]] = key lookup_file = work_params.output.prefix + "_lookup.pickle" reverse_lookup_file = work_params.output.prefix + "_reverse_lookup.pickle" easy_pickle.dump(lookup_file, result) easy_pickle.dump(reverse_lookup_file, reverse_lookup)
# lattice_id = flex double: assignment of each miller index to a lattice number ###################### if work_params.nproc < 5: print "Sorting the lattices with 1 processor" result = run(L, nproc=1, verbose=True) else: print "Sorting the lattices with %d processors" % work_params.nproc result = run_multiprocess(L, nproc=work_params.nproc, verbose=False) for key in result.keys(): print key, len(result[key]) # 2) pickle the postsort (reindexed) ARRAY, LATTICES XXX not done yet; not clear if needed reverse_lookup = {} frame_id_list = list(scaler.frames_mysql["frame_id"]) for key in result.keys(): for frame in result[key]: frame_idx = frame_id_list.index(frame) reverse_lookup[scaler.frames_mysql["unique_file_name"] [frame_idx]] = key lookup_file = work_params.output.prefix + "_lookup.pickle" reverse_lookup_file = work_params.output.prefix + "_reverse_lookup.pickle" easy_pickle.dump(lookup_file, result) easy_pickle.dump(reverse_lookup_file, reverse_lookup) if (__name__ == "__main__"): import sys result = run(args=sys.argv[1:])
def run(args): phil = iotbx.phil.process_command_line(args=args, master_string=master_phil).show() work_params = phil.work.extract() log = open("%s_%s_merging.log" % (work_params.output.prefix,work_params.scaling.algorithm), "w") out = multi_out() out.register("log", log, atexit_send_to=None) out.register("stdout", sys.stdout) print >> out, "Target unit cell and space group:" print >> out, " ", work_params.target_unit_cell print >> out, " ", work_params.target_space_group miller_set = symmetry( unit_cell=work_params.target_unit_cell, space_group_info=work_params.target_space_group ).build_miller_set( anomalous_flag=not work_params.merge_anomalous, d_min=work_params.d_min) from xfel.merging.general_fcalc import random_structure i_model = random_structure(work_params) # ---- Augment this code with any special procedures for x scaling scaler = xscaling_manager( miller_set=miller_set, i_model=i_model, params=work_params, log=out) scaler.read_all_mysql() print "finished reading the database" sg = miller_set.space_group() hkl_asu = scaler.observations["hkl_id"] imageno = scaler.observations["frame_id"] intensi = scaler.observations["i"] lookup = scaler.millers["merged_asu_hkl"] origH = scaler.observations["H"] origK = scaler.observations["K"] origL = scaler.observations["L"] from cctbx.array_family import flex print "# observations from the database",len(scaler.observations["hkl_id"]) hkl = flex.miller_index(flex.select(lookup,hkl_asu)) from cctbx import miller hkl_list = miller_set.customized_copy(indices = hkl) ARRAY = miller.array(miller_set = hkl_list, data = intensi) LATTICES = miller.array(miller_set = hkl_list, data = imageno) from cctbx.merging.brehm_diederichs import run_multiprocess, run L = (ARRAY, LATTICES) # tuple(data,lattice_id) from libtbx import easy_pickle presort_file = work_params.output.prefix+"_intensities_presort.pickle" print "pickling these intensities to", presort_file easy_pickle.dump(presort_file,L) ###### INPUTS ####### # data = miller array: ASU miller index + intensity (sigmas not implemented yet) # lattice_id = flex double: assignment of each miller index to a lattice number ###################### if work_params.nproc < 5: print "Sorting the lattices with 1 processor" result = run(L,nproc=1,verbose=True) else: print "Sorting the lattices with %d processors"%work_params.nproc result = run_multiprocess(L,nproc=work_params.nproc, verbose=False) for key in result.keys(): print key,len(result[key]) # 2) pickle the postsort (reindexed) ARRAY, LATTICES XXX not done yet; not clear if needed reverse_lookup = {} frame_id_list = list(scaler.frames_mysql["frame_id"]) for key in result.keys(): for frame in result[key]: frame_idx = frame_id_list.index(frame) reverse_lookup[scaler.frames_mysql["unique_file_name"][frame_idx]] = key lookup_file = work_params.output.prefix+"_lookup.pickle" reverse_lookup_file = work_params.output.prefix+"_reverse_lookup.pickle" easy_pickle.dump(lookup_file, result) easy_pickle.dump(reverse_lookup_file, reverse_lookup)
# data = miller array: ASU miller index + intensity (sigmas not implemented yet) # lattice_id = flex double: assignment of each miller index to a lattice number ###################### if work_params.nproc < 5: print "Sorting the lattices with 1 processor" result = run(L,nproc=1,verbose=True) else: print "Sorting the lattices with %d processors"%work_params.nproc result = run_multiprocess(L,nproc=work_params.nproc, verbose=False) for key in result.keys(): print key,len(result[key]) # 2) pickle the postsort (reindexed) ARRAY, LATTICES XXX not done yet; not clear if needed reverse_lookup = {} frame_id_list = list(scaler.frames_mysql["frame_id"]) for key in result.keys(): for frame in result[key]: frame_idx = frame_id_list.index(frame) reverse_lookup[scaler.frames_mysql["unique_file_name"][frame_idx]] = key lookup_file = work_params.output.prefix+"_lookup.pickle" reverse_lookup_file = work_params.output.prefix+"_reverse_lookup.pickle" easy_pickle.dump(lookup_file, result) easy_pickle.dump(reverse_lookup_file, reverse_lookup) if (__name__ == "__main__"): import sys result = run(args = sys.argv[1:])
def run(args): cmd_line = command_line.argument_interpreter(master_params=master_phil_scope) working_phil, files = cmd_line.process_and_fetch( args=args, custom_processor="collect_remaining") working_phil.show() params = working_phil.extract() miller_array_all = None lattice_ids = None space_group = None file_name_dict = {} lattice_id = -1 for file_name in files: lattice_id += 1 #print "lattice_id: %i" %(lattice_id) reader = any_reflection_file(file_name) as_miller_arrays = reader.as_miller_arrays(merge_equivalents=False) #for ma in as_miller_arrays: print ma.info().labels intensities = [ma for ma in as_miller_arrays if ma.info().labels == ['I', 'SIGI']][0] intensities = intensities.customized_copy(anomalous_flag=True).set_info( intensities.info()) intensities.set_observation_type_xray_intensity() #intensities.crystal_symmetry().show_summary() #print intensities.info().labels if space_group is None: space_group = intensities.space_group() else: assert intensities.space_group() == space_group assert reader.file_type() == 'ccp4_mtz' file_name_dict[lattice_id] = file_name ids = intensities.customized_copy( data=flex.double(intensities.size(), lattice_id), sigmas=None) assert ids.size() == intensities.size() if miller_array_all is None: miller_array_all = intensities lattice_ids = ids else: miller_array_all = miller_array_all.customized_copy( indices=miller_array_all.indices().concatenate(intensities.indices()), data=miller_array_all.data().concatenate(intensities.data()), sigmas=miller_array_all.sigmas().concatenate(intensities.sigmas())) lattice_ids = lattice_ids.customized_copy( indices=lattice_ids.indices().concatenate(ids.indices()), data=lattice_ids.data().concatenate(ids.data())) assert miller_array_all.size() == lattice_ids.size() intensities = intensities.map_to_asu() intensities = intensities.customized_copy(anomalous_flag=True) intensities_p1 = intensities.expand_to_p1().merge_equivalents().array() intensities = intensities_p1.customized_copy( crystal_symmetry=intensities.crystal_symmetry()) L = (miller_array_all, lattice_ids) L[0].crystal_symmetry().show_summary() from cctbx.merging import brehm_diederichs if params.nproc == 1: result_sets = brehm_diederichs.run( L, asymmetric=params.asymmetric, nproc=1, show_plot=params.show_plot, save_plot=params.save_plot) else: result_sets = brehm_diederichs.run_multiprocess( L, asymmetric=params.asymmetric, nproc=params.nproc, show_plot=params.show_plot, save_plot=params.save_plot) out_file = open('reindex.txt', 'wb') for reindexing_op, wedges in result_sets.iteritems(): cb_op = sgtbx.change_of_basis_op(reindexing_op) for wedge in wedges: file_name = file_name_dict[wedge] if out_file is not None: print >> out_file, file_name, cb_op.as_hkl() basename = os.path.basename(file_name) out_name = os.path.splitext(basename)[0] + params.suffix + ".mtz" reader = any_reflection_file(file_name) assert reader.file_type() == 'ccp4_mtz' mtz_object = reader.file_content() if not cb_op.is_identity_op(): print "reindexing %s" %file_name mtz_object.change_basis_in_place(cb_op) mtz_object.write(out_name)