Пример #1
0
 def run(self, args):
   #read inputs
   from prime.postrefine.mod_input import process_input, read_pickles
   iparams, txt_out_input = process_input(args)
   runh = run_handler()
   print txt_out_input
   #read all integration pickles
   frame_files = read_pickles(iparams.data)
   n_frames = len(frame_files)
   if n_frames == 0:
     print "No integration pickle found. Exit program."
     exit()
   frames = [(i, frame_files[i], iparams) for i in range(n_frames)]
   #run command to calculate mean_I
   mm_I = 0
   if iparams.flag_apply_b_by_frame == False:
     inp_pickle = {'iparams':iparams, 'frames':frames}
     pickle.dump(inp_pickle, open(iparams.run_no+'/inputs/0.inp',"wb"))
     call(["prime._genref_determine_mean_I", iparams.run_no+'/inputs/0.inp'])
     runh.check_done(iparams, n_frames)
     mm_I = calc_mean_of_mean_I(iparams)
   #run command for scaling
   frames = [(i, frame_files[i], iparams, mm_I) for i in range(n_frames)]
   inp_pickle = {'iparams':iparams, 'frames':frames}
   pickle.dump(inp_pickle, open(iparams.run_no+'/inputs/0.inp',"wb"))
   call(["prime._genref_scale_frame_by_mean_I", iparams.run_no+'/inputs/0.inp'])
   runh.check_done(iparams, n_frames)
   #write output to logfile
   txt_out = 'Scaling complete. Run prime.merge your_input_phil.phil to merge for a reference set.\n'
   print txt_out
   f = open(iparams.run_no+'/log.txt', 'w')
   f.write(txt_out_input+txt_out)
   f.close()
Пример #2
0
 def run(self, args):
   #generate reference set, prepare scaled pickles
   print "Scaling integration pickles"
   grh = genref_handler()
   grh.run(args)
   #merge for the first reference set
   print "Merging for a reference set."
   mh = merge_handler()
   mh.run(args)
   #start post-refinement loops
   iparams, txt_out_input = process_input(argv=args, flag_check_exist=False)
   prh = postrefine_handler()
   for i_cycle in range(iparams.n_postref_cycle):
     print "Post-refinement cycle ", i_cycle+1
     prh.run(args)
     print "Merging cycle ", i_cycle+1
     if i_cycle < iparams.n_postref_cycle - 1:
       mh.run(args)
     else:
       #final run
       mh.run(args, avg_mode='final')
       #copy the max_no.mtz to postref_final.mtz
       DIR = iparams.run_no+'/mtz/'
       file_no_list = [int(fname.split('.')[0]) for fname in os.listdir(DIR)]
       if len(file_no_list) > 0:
         mtz_final_fname = DIR + str(max(file_no_list)) + '.mtz'
         shutil.copy(mtz_final_fname, iparams.run_no+'/postref_final.mtz')
Пример #3
0
def run(argv):
    comm.Barrier()
    start_time = MPI.Wtime()
    # broadcast parameters
    if rank == 0:
        iparams, txt_out_input = process_input(argv)
        iparams.flag_volume_correction = False
        iparams.flag_hush = True
        print(txt_out_input)
        frame_files = read_pickles(iparams.data)
    else:
        iparams = None
        frame_files = None
        txt_out_input = None
    comm.Barrier()
    # assign scaling task
    if rank == 0:
        master(frame_files, iparams, "scale")
        result = []
    else:
        result = client()
    result = comm.gather(result, root=0)
    comm.Barrier()
    _, mdh = merge(result, iparams, txt_out_input, "mean_scaled", "average")
    # postrefine task
    if rank == 0:
        n_postref_cycle = iparams.n_postref_cycle
        results = [None] * len(frame_files)
    else:
        n_postref_cycle = None
        results = None
    n_postref_cycle = comm.bcast(n_postref_cycle, root=0)
    avg_mode = "weighted"
    for i_iter in range(n_postref_cycle):
        comm.Barrier()
        if i_iter == n_postref_cycle - 1:
            avg_mode = "final"
        if rank == 0:
            print("Start post-refinement cycle %d" % (i_iter + 1))
            master(
                (frame_files, mdh.miller_array_merge, results, avg_mode),
                iparams,
                "postref",
            )
            result = []
        else:
            result = client()
        result = comm.gather(result, root=0)
        comm.Barrier()
        results, mdh = merge(result, iparams, "",
                             "postref_cycle_%d" % (i_iter + 1), avg_mode)
    # collect time profile
    comm.Barrier()
    end_time = MPI.Wtime()
    txt_time = "Elapsed Time (s):%10.2f\n" % (end_time - start_time)
    MPI.Finalize()
Пример #4
0
 def run(self, args):
   #read inputs
   runh = run_handler()
   from prime.postrefine.mod_input import process_input
   iparams, txt_out_input = process_input(argv=args, flag_check_exist=False)
   iparams.flag_volume_correction = False
   if iparams.partiality_model == "Lognormal":
     iparams.voigt_nu = 0.008 #use voigt_nu as lognpdf zero parameter
   #read all result pickles
   try:
     DIR = iparams.run_no+'/pickles/'
     pickle_results = [pickle.load(open(DIR+fname, "rb")) for fname in os.listdir(DIR)]
     n_results = len(pickle_results)
   except Exception:
     print "Error reading input pickles."
     print "*VERSION UPGRADE NOTE* use prime.run instead of prime.postrefine to run all processes together."
     exit()
   #get reference file - look for n.mtz with n as maximum number.
   hklrefin = None
   if iparams.hklrefin is None:
     DIR = iparams.run_no+'/mtz/'
     file_no_list = [int(fname.split('.')[0]) for fname in os.listdir(DIR)]
     if len(file_no_list) > 0:
       hklrefin = DIR + str(max(file_no_list)) + '.mtz'
   else:
     hklrefin = iparams.hklrefin
   if hklrefin is None:
     print "No reference set found. Exit program"
   print "Reference set:", hklrefin
   mxh = mx_handler()
   flag_hklrefin_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(hklrefin)
   #post-refinement
   avg_mode = 'weighted'
   #run command for post-refinement
   frames = [(i, pickle_results[i], iparams, miller_array_ref, avg_mode) for i in range(n_results)]
   inp_pickle = {'iparams':iparams, 'frames':frames}
   pickle.dump(inp_pickle, open(iparams.run_no+'/inputs/0.inp',"wb"))
   call(["prime._postrefine_frame", iparams.run_no+'/inputs/0.inp'])
   runh.check_done(iparams, n_results)
   print "Post-refinement completed. Run prime.merge for the merged reflection file."
Пример #5
0
def read_input(args):
    from prime.postrefine.mod_input import process_input
    iparams, txt_out_input = process_input(args)
    return iparams, txt_out_input
Пример #6
0
from __future__ import division
# LIBTBX_SET_DISPATCHER_NAME prime.run
""" handle prime run """
__author__ = 'Monarin Uervirojnangkoorn, [email protected]'

from subprocess import call
from prime.postrefine.mod_run import run_handler
from prime.postrefine.mod_input import process_input
import sys

if __name__ == "__main__":
    iparams, txt_out = process_input(
        sys.argv[1:] if len(sys.argv) > 1 else None, flag_mkdir=False)
    if iparams.queue.mode:
        args = [
            "bsub", "-q", iparams.queue.qname, "-n",
            str(iparams.queue.n_nodes), "prime.postrefine"
        ] + sys.argv[1:] if len(sys.argv) > 1 else []
        call(args)
        print "Submitting prime job to ", iparams.queue.qname
        runh = run_handler()
        runh.check_done(iparams)
    else:
        args = ["prime.postrefine"] + sys.argv[1:] if len(sys.argv) > 1 else []
        call(args)
 def run(self, args):
   #read inputs
   from prime.postrefine.mod_input import process_input, read_pickles
   iparams, txt_out_input = process_input(args)
   print txt_out_input
   f = open(iparams.run_no+'/log.txt', 'w')
   f.write(txt_out_input)
   f.close()
   #if solution pickle is given, return the file name
   if iparams.indexing_ambiguity.index_basis_in is not None:
     if iparams.indexing_ambiguity.index_basis_in.endswith('.pickle'):
       return iparams.indexing_ambiguity.index_basis_in, iparams
   #read all integration pickles
   frame_files = read_pickles(iparams.data)
   n_frames = len(frame_files)
   if n_frames == 0:
     print "No integration pickle found. Exit program."
     return None, iparams
   #exit if no problem
   if self.should_terminate(iparams, frame_files[0]):
     print "No indexing ambiguity problem. Set index_ambiguity.mode = Forced and assigned_basis = list of basis formats to solve pseudo-twinning problem."
     return None, iparams
   #continue with (Auto - alt>1, find solution), (Auto - alt>1, mtz)
   #(Forced - assigned_basis, mtz), (Forced - assigned_basis, find solution)
   #*************************************************
   #if mtz file is given, use it to solve the problem
   sol_fname = iparams.run_no+'/index_ambiguity/solution_pickle.pickle'
   if iparams.indexing_ambiguity.index_basis_in is not None:
     if iparams.indexing_ambiguity.index_basis_in.endswith('.mtz'):
       mxh = mx_handler()
       flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(iparams.indexing_ambiguity.index_basis_in)
       if flag_ref_found == False:
         print "Reference mtz file not found. Set indexing_ambiguity.index_basis_in = None to enable auto generate the solutions."
         return None, iparams
       else:
         frames = [(i, frame_files[i], iparams, miller_array_ref) for i in range(n_frames)]
         cc_results = pool_map(
           iterable=frames,
           func=solve_with_mtz_mproc,
           processes=iparams.n_processors)
         sol_pickle = {}
         for result in cc_results:
           pickle_filename, index_basis = result
           sol_pickle[pickle_filename] = index_basis
         pickle.dump(sol_pickle, open(sol_fname,"wb"))
         return sol_fname, iparams
   #*************************************************
   #solve with Brehm & Diederichs - sample size n_sample_frames then bootstrap the rest
   frames = [(i, frame_files[i], iparams) for i in random.sample(range(n_frames), iparams.indexing_ambiguity.n_sample_frames)]
   #get observations list
   print "Reading observations"
   alt_dict_results = pool_map(
         iterable=frames,
         func=get_obs_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   obs_list = []
   for result in alt_dict_results:
     alt_dict, pickle_filename = result
     if alt_dict is not None:
       for key in alt_dict.keys():
         frame_dup_files.append(pickle_filename)
         frame_keys.append(key)
         obs_list.append(alt_dict[key])
   frames = [(i, frame_dup_files[i], frame_keys[i], obs_list[i], obs_list) for i in range(len(frame_dup_files))]
   #calculate r
   print "Calculating R"
   calc_r_results = pool_map(
         iterable=frames,
         func=calculate_r_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   r_matrix = []
   for result in calc_r_results:
     if result is not None:
       pickle_filename, index_basis, r_set = result
       frame_dup_files.append(pickle_filename)
       frame_keys.append(index_basis)
       if len(r_matrix) == 0:
         r_matrix = r_set
       else:
         r_matrix = np.append(r_matrix, r_set, axis=0)
   #choose groups with best CC
   print "Selecting frames with best R"
   i_mean_r = np.argsort(np.mean(r_matrix, axis=1))[::-1]
   r_matrix_sorted = r_matrix[i_mean_r]
   frame_dup_files_sorted = np.array(frame_dup_files)[i_mean_r]
   frame_keys_sorted = np.array(frame_keys)[i_mean_r]
   frame_dup_files_sel = []
   for frame_file, frame_key, r_set in zip(frame_dup_files_sorted, frame_keys_sorted, r_matrix_sorted):
     if frame_file not in frame_dup_files_sel:
       frame_dup_files_sel.append(frame_file)
       print frame_file, frame_key, np.mean(r_set)
       if len(frame_dup_files_sel) >= iparams.indexing_ambiguity.n_selected_frames:
         print 'Found all %6.0f good frames'%(len(frame_dup_files_sel))
         break
   ##
   #rebuild observations and r_matrix
   frames = [(i, frame_dup_files_sel[i], iparams) for i in range(len(frame_dup_files_sel))]
   #get observations list
   print "Re-reading observations"
   alt_dict_results = pool_map(
         iterable=frames,
         func=get_obs_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   obs_list = []
   for result in alt_dict_results:
     alt_dict, pickle_filename = result
     if alt_dict is not None:
       for key in alt_dict.keys():
         frame_dup_files.append(pickle_filename)
         frame_keys.append(key)
         obs_list.append(alt_dict[key])
   frames = [(i, frame_dup_files[i], frame_keys[i], obs_list[i], obs_list) for i in range(len(frame_dup_files))]
   #calculate r
   print "Re-calculating R"
   calc_r_results = pool_map(
         iterable=frames,
         func=calculate_r_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   r_matrix = []
   for result in calc_r_results:
     if result is not None:
       pickle_filename, index_basis, r_set = result
       frame_dup_files.append(pickle_filename)
       frame_keys.append(index_basis)
       if len(r_matrix) == 0:
         r_matrix = r_set
       else:
         r_matrix = np.append(r_matrix, r_set, axis=0)
   print "Minimizing frame distance"
   idah = indamb_handler()
   x_set = idah.optimize(r_matrix, flag_plot=iparams.flag_plot)
   x_pickle = {'frame_dup_files':frame_dup_files, 'frame_keys':frame_keys, \
     'r_matrix':r_matrix, 'x_set':x_set}
   pickle.dump(x_pickle, open(iparams.run_no+'/index_ambiguity/x.out',"wb"))
   print "Clustering results"
   kmh = kmeans_handler()
   k = 2**(len(idah.get_observations(frame_dup_files[0], iparams))-1)
   centroids, labels = kmh.run(x_set, k, flag_plot=iparams.flag_plot)
   print "Get solution pickle"
   sample_fname = iparams.run_no+'/index_ambiguity/sample.lst'
   sol_pickle = idah.assign_basis(frame_dup_files, frame_keys, labels, k, sample_fname)
   pickle.dump(sol_pickle, open(sol_fname,"wb"))
   #if more frames found, merge the sample frames to get a reference set
   #that can be used for breaking the ambiguity.
   if n_frames > iparams.indexing_ambiguity.n_selected_frames:
     print "Breaking the indexing ambiguity for the remaining images."
     old_iparams_data = iparams.data[:]
     iparams.data = [sample_fname]
     iparams.indexing_ambiguity.index_basis_in = sol_fname
     grh = genref_handler()
     grh.run_by_params(iparams)
     mh = merge_handler()
     mh.run_by_params(iparams)
     DIR = iparams.run_no+'/mtz/'
     file_no_list = [int(fname.split('.')[0]) for fname in os.listdir(DIR)]
     if len(file_no_list) > 0:
       hklref_indamb = DIR + str(max(file_no_list)) + '.mtz'
       print "Bootstrap reference reflection set:", hklref_indamb
       #setup a list of remaining frames
       frame_files_remain = []
       for frame in frame_files:
         if frame not in sol_pickle:
           frame_files_remain.append(frame)
       #determine index basis
       mxh = mx_handler()
       flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(hklref_indamb)
       frames = [(i, frame_files_remain[i], iparams, miller_array_ref) for i in range(len(frame_files_remain))]
       cc_results = pool_map(
         iterable=frames,
         func=solve_with_mtz_mproc,
         processes=iparams.n_processors)
       for result in cc_results:
         pickle_filename, index_basis = result
         sol_pickle[pickle_filename] = index_basis
     iparams.data = old_iparams_data[:]
   #write out solution pickle
   pickle.dump(sol_pickle, open(sol_fname,"wb"))
   #write out text output
   txt_out = "Solving indexing ambiguity complete. Solution file saved to "+sol_fname+"\n"
   f = open(iparams.run_no+'/log.txt', 'a')
   f.write(txt_out)
   f.close()
   return sol_fname, iparams
Пример #8
0
 def run(self, args):
     #read inputs
     from prime.postrefine.mod_input import process_input
     iparams, txt_out_input = process_input(args)
     print txt_out_input
     self.run_by_params(iparams)
Пример #9
0
def run(argv):
    comm.Barrier()
    start_time = MPI.Wtime()
    # broadcast parameters
    if rank == 0:
        iparams, txt_out_input = process_input(argv)
        iparams.flag_volume_correction = False
        iparams.flag_hush = True
        print(txt_out_input)
        frame_files = read_pickles(iparams.data)
    else:
        iparams = None
        frame_files = None
    comm.Barrier()
    # assign scaling task
    if rank == 0:
        master(frame_files, iparams, "scale")
        result = []
    else:
        result = client()
    result = comm.gather(result, root=0)
    comm.Barrier()
    # pre-merge task
    if rank == 0:
        results = sum(result, [])
        print("Scaling is done on %d cores for %d frames" %
              (size, len(results)))
        master(results, iparams, "pre_merge")
        result = []
    else:
        result = client()
    result = comm.gather(result, root=0)
    comm.Barrier()
    # merge task
    if rank == 0:
        print("Pre-merge is done on %d cores" % (len(result)))
        master(result, iparams, "merge")
        result = []
    else:
        result = client()
    # finalize merge
    result = comm.gather(result, root=0)
    comm.Barrier()
    if rank == 0:
        print("Merge completed on %d cores" % (len(result)))
        results = sum(result, [])
        mdh = merge_data_handler()
        txt_out_rejection = ""
        for _mdh, _txt_out_rejection in results:
            mdh.extend(_mdh)
            txt_out_rejection += _txt_out_rejection
        # selet only indices with non-Inf non-Nan stats
        selections = flex.bool([
            False if (math.isnan(r0) or math.isinf(r0) or math.isnan(r1)
                      or math.isinf(r1)) else True
            for r0, r1 in zip(mdh.r_meas_div, mdh.r_meas_divisor)
        ])
        mdh.reduce_by_selection(selections)
        its = intensities_scaler()
        mdh, txt_merge_mean_table = its.write_output(mdh, iparams, "test",
                                                     "average")
        print(txt_merge_mean_table)
    # collect time profile
    comm.Barrier()
    end_time = MPI.Wtime()
    txt_time = "Elapsed Time (s):%10.2f\n" % (end_time - start_time)
    # write log output
    if rank == 0:
        print(txt_time)
        with open(os.path.join(iparams.run_no, "log.txt"), "w") as f:
            f.write(txt_out_input + txt_merge_mean_table + txt_time)
        with open(os.path.join(iparams.run_no, "rejections.txt"), "w") as f:
            f.write(txt_out_rejection)
    MPI.Finalize()
 def run(self, args):
   #read inputs
   from prime.postrefine.mod_input import process_input, read_pickles
   iparams, txt_out_input = process_input(args)
   print txt_out_input
   f = open(iparams.run_no+'/log.txt', 'w')
   f.write(txt_out_input)
   f.close()
   #if solution pickle is given, return the file name
   if iparams.indexing_ambiguity.index_basis_in is not None:
     if iparams.indexing_ambiguity.index_basis_in.endswith('.pickle'):
       return iparams.indexing_ambiguity.index_basis_in, iparams
   #read all integration pickles
   frame_files = read_pickles(iparams.data)
   n_frames = len(frame_files)
   if n_frames == 0:
     print "No integration pickle found. Exit program."
     return None, iparams
   #exit if no problem
   if self.should_terminate(iparams, frame_files[0]):
     print "No indexing ambiguity problem. Set index_ambiguity.mode = Forced and assigned_basis = list of basis formats to solve pseudo-twinning problem."
     return None, iparams
   #continue with (Auto - alt>1, find solution), (Auto - alt>1, mtz)
   #(Forced - assigned_basis, mtz), (Forced - assigned_basis, find solution)
   #*************************************************
   #if mtz file is given, use it to solve the problem
   sol_fname = iparams.run_no+'/index_ambiguity/solution_pickle.pickle'
   if iparams.indexing_ambiguity.index_basis_in is not None:
     if iparams.indexing_ambiguity.index_basis_in.endswith('.mtz'):
       mxh = mx_handler()
       flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(iparams.indexing_ambiguity.index_basis_in)
       if flag_ref_found == False:
         print "Reference mtz file not found. Set indexing_ambiguity.index_basis_in = None to enable auto generate the solutions."
         return None, iparams
       else:
         frames = [(i, frame_files[i], iparams, miller_array_ref) for i in range(n_frames)]
         cc_results = pool_map(
           iterable=frames,
           func=solve_with_mtz_mproc,
           processes=iparams.n_processors)
         sol_pickle = {}
         for result in cc_results:
           pickle_filename, index_basis = result
           sol_pickle[pickle_filename] = index_basis
         pickle.dump(sol_pickle, open(sol_fname,"wb"))
         return sol_fname, iparams
   #*************************************************
   #solve with Brehm & Diederichs - sample size n_sample_frames then bootstrap the rest
   frames = [(i, frame_files[i], iparams) for i in random.sample(range(n_frames), iparams.indexing_ambiguity.n_sample_frames)]
   #get observations list
   print "Reading observations"
   alt_dict_results = pool_map(
         iterable=frames,
         func=get_obs_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   obs_list = []
   for result in alt_dict_results:
     alt_dict, pickle_filename = result
     if alt_dict is not None:
       for key in alt_dict.keys():
         frame_dup_files.append(pickle_filename)
         frame_keys.append(key)
         obs_list.append(alt_dict[key])
   frames = [(i, frame_dup_files[i], frame_keys[i], obs_list[i], obs_list) for i in range(len(frame_dup_files))]
   #calculate r
   print "Calculating R"
   calc_r_results = pool_map(
         iterable=frames,
         func=calculate_r_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   r_matrix = []
   for result in calc_r_results:
     if result is not None:
       pickle_filename, index_basis, r_set = result
       frame_dup_files.append(pickle_filename)
       frame_keys.append(index_basis)
       if len(r_matrix) == 0:
         r_matrix = r_set
       else:
         r_matrix = np.append(r_matrix, r_set, axis=0)
   #choose groups with best CC
   print "Selecting frames with best R"
   i_mean_r = np.argsort(np.mean(r_matrix, axis=1))[::-1]
   r_matrix_sorted = r_matrix[i_mean_r]
   frame_dup_files_sorted = np.array(frame_dup_files)[i_mean_r]
   frame_keys_sorted = np.array(frame_keys)[i_mean_r]
   frame_dup_files_sel = []
   for frame_file, frame_key, r_set in zip(frame_dup_files_sorted, frame_keys_sorted, r_matrix_sorted):
     if frame_file not in frame_dup_files_sel:
       frame_dup_files_sel.append(frame_file)
       print frame_file, frame_key, np.mean(r_set)
       if len(frame_dup_files_sel) >= iparams.indexing_ambiguity.n_selected_frames:
         print 'Found all %6.0f good frames'%(len(frame_dup_files_sel))
         break
   ##
   #rebuild observations and r_matrix
   frames = [(i, frame_dup_files_sel[i], iparams) for i in range(len(frame_dup_files_sel))]
   #get observations list
   print "Re-reading observations"
   alt_dict_results = pool_map(
         iterable=frames,
         func=get_obs_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   obs_list = []
   for result in alt_dict_results:
     alt_dict, pickle_filename = result
     if alt_dict is not None:
       for key in alt_dict.keys():
         frame_dup_files.append(pickle_filename)
         frame_keys.append(key)
         obs_list.append(alt_dict[key])
   frames = [(i, frame_dup_files[i], frame_keys[i], obs_list[i], obs_list) for i in range(len(frame_dup_files))]
   #calculate r
   print "Re-calculating R"
   calc_r_results = pool_map(
         iterable=frames,
         func=calculate_r_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   r_matrix = []
   for result in calc_r_results:
     if result is not None:
       pickle_filename, index_basis, r_set = result
       frame_dup_files.append(pickle_filename)
       frame_keys.append(index_basis)
       if len(r_matrix) == 0:
         r_matrix = r_set
       else:
         r_matrix = np.append(r_matrix, r_set, axis=0)
   print "Minimizing frame distance"
   idah = indamb_handler()
   x_set = idah.optimize(r_matrix, flag_plot=iparams.flag_plot)
   x_pickle = {'frame_dup_files':frame_dup_files, 'frame_keys':frame_keys, \
     'r_matrix':r_matrix, 'x_set':x_set}
   pickle.dump(x_pickle, open(iparams.run_no+'/index_ambiguity/x.out',"wb"))
   print "Clustering results"
   kmh = kmeans_handler()
   k = 2**(len(idah.get_observations(frame_dup_files[0], iparams))-1)
   centroids, labels = kmh.run(x_set, k, flag_plot=iparams.flag_plot)
   print "Get solution pickle"
   sample_fname = iparams.run_no+'/index_ambiguity/sample.lst'
   sol_pickle = idah.assign_basis(frame_dup_files, frame_keys, labels, k, sample_fname)
   pickle.dump(sol_pickle, open(sol_fname,"wb"))
   #if more frames found, merge the sample frames to get a reference set
   #that can be used for breaking the ambiguity.
   if n_frames > iparams.indexing_ambiguity.n_selected_frames:
     print "Breaking the indexing ambiguity for the remaining images."
     old_iparams_data = iparams.data[:]
     iparams.data = [sample_fname]
     iparams.indexing_ambiguity.index_basis_in = sol_fname
     grh = genref_handler()
     grh.run_by_params(iparams)
     mh = merge_handler()
     mh.run_by_params(iparams)
     DIR = iparams.run_no+'/mtz/'
     file_no_list = [int(fname.split('.')[0]) for fname in os.listdir(DIR)]
     if len(file_no_list) > 0:
       hklref_indamb = DIR + str(max(file_no_list)) + '.mtz'
       print "Bootstrap reference reflection set:", hklref_indamb
       #setup a list of remaining frames
       frame_files_remain = []
       for frame in frame_files:
         if frame not in sol_pickle:
           frame_files_remain.append(frame)
       #determine index basis
       mxh = mx_handler()
       flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(hklref_indamb)
       frames = [(i, frame_files_remain[i], iparams, miller_array_ref) for i in range(len(frame_files_remain))]
       cc_results = pool_map(
         iterable=frames,
         func=solve_with_mtz_mproc,
         processes=iparams.n_processors)
       for result in cc_results:
         pickle_filename, index_basis = result
         sol_pickle[pickle_filename] = index_basis
     iparams.data = old_iparams_data[:]
   #write out solution pickle
   pickle.dump(sol_pickle, open(sol_fname,"wb"))
   #write out text output
   txt_out = "Solving indexing ambiguity complete. Solution file saved to "+sol_fname+"\n"
   f = open(iparams.run_no+'/log.txt', 'a')
   f.write(txt_out)
   f.close()
   return sol_fname, iparams
Пример #11
0
 def run(self, args):
     #read inputs
     from prime.postrefine.mod_input import process_input
     iparams, txt_out_input = process_input(argv=args,
                                            flag_check_exist=False)
     self.run_by_params(iparams)
Пример #12
0
    def run(self, args):
        #read inputs
        from prime.postrefine.mod_input import process_input, read_pickles
        iparams, txt_out_input = process_input(args)
        print txt_out_input
        with open(os.path.join(iparams.run_no, self.module_name, 'log.txt'),
                  'w') as f:
            f.write(txt_out_input)
        #read all integration pickles
        frame_files = read_pickles(iparams.data)
        n_frames = len(frame_files)
        if n_frames == 0:
            print "No integration pickle found. Exit program."
            return None, iparams
        #start
        if iparams.isoform_cluster.isorefin:
            #get collection of iso. ref. reflection set.
            mxh = mx_handler()
            miller_array_ref_set = []
            for isorefin in iparams.isoform_cluster.isorefin:
                flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(
                    isorefin)
                if flag_ref_found:
                    miller_array_ref_set.append(miller_array_ref)
            #get observation list
            frame_files_sel, obs_list = self.get_observation_set(
                iparams, frame_files, n_frames)
            if miller_array_ref_set:
                frames = [(i, frame_files_sel[i], obs_list[i], iparams,
                           miller_array_ref_set) for i in range(len(obs_list))]
                cc_results = pool_map(iterable=frames,
                                      func=solve_with_mtz_mproc,
                                      processes=iparams.n_processors)
                sol_pickle = {}
                for result in cc_results:
                    pickle_filename, cluster_id = result
                    sol_pickle[pickle_filename] = cluster_id
                write_out_solutions(iparams, sol_pickle)
                txt_out = "Cluster images with given " + str(
                    len(miller_array_ref_set)
                ) + " mtz files completed. Use cluster_0.lst - cluster_k.lst (for k clusters) for merging.\n"
                print txt_out
                with open(
                        os.path.join(iparams.run_no, self.module_name,
                                     'log.txt'), 'a') as f:
                    f.write(txt_out)
            return

        #*************************************************
        #solve with Brehm & Diederichs - sample size n_sample_frames then bootstrap the rest
        txt_out = "Cluster images with B&D algorithms.\n"
        frame_files_sel, obs_list = self.get_observation_set(
            iparams, frame_files, iparams.isoform_cluster.n_sample_frames)
        frames = [(i, frame_files_sel[i], obs_list[i], obs_list)
                  for i in range(len(frame_files_sel))]
        #calculate r
        print "Calculating R"
        calc_r_results = pool_map(iterable=frames,
                                  func=calculate_r_mproc,
                                  processes=iparams.n_processors)
        frame_files_sel = []
        r_matrix = []
        obs_list = []
        for result in calc_r_results:
            if result:
                pickle_filename, r_set, obs = result
                frame_files_sel.append(pickle_filename)
                obs_list.append(obs)
                if len(r_matrix) == 0:
                    r_matrix = r_set
                else:
                    r_matrix = np.append(r_matrix, r_set, axis=0)
        #choose groups with best R
        print "Selecting frames with best R"
        i_mean_r = np.argsort(np.mean(r_matrix, axis=1))[::-1]
        r_matrix_sorted = r_matrix[i_mean_r]
        frame_files_sorted = np.array(frame_files_sel)[i_mean_r]
        obs_list_sorted = np.array(obs_list)[i_mean_r]
        frame_files_sel = []
        obs_sel = []
        for frame_file, r_set, obs in zip(frame_files_sorted, r_matrix_sorted,
                                          obs_list_sorted):
            if frame_file not in frame_files_sel:
                frame_files_sel.append(frame_file)
                obs_sel.append(obs)
                print frame_file, np.mean(r_set)
                if len(frame_files_sel
                       ) >= iparams.isoform_cluster.n_selected_frames:
                    print 'Found all %6.0f good frames' % (
                        len(frame_files_sel))
                    break
        #Recalculate r for the new selected list
        frames = [(i, frame_files_sel[i], obs_sel[i], obs_sel)
                  for i in range(len(frame_files_sel))]
        print "Re-calculating R"
        calc_r_results = pool_map(iterable=frames,
                                  func=calculate_r_mproc,
                                  processes=iparams.n_processors)
        frame_files_sel = []
        r_matrix = []
        obs_list = []
        for result in calc_r_results:
            if result:
                pickle_filename, r_set, obs = result
                frame_files_sel.append(pickle_filename)
                obs_list.append(obs)
                if len(r_matrix) == 0:
                    r_matrix = r_set
                else:
                    r_matrix = np.append(r_matrix, r_set, axis=0)
        print "Minimizing frame distance"
        isoch = isoform_cluster_handler()
        x_set = isoch.optimize(r_matrix, flag_plot=iparams.flag_plot)
        print "Clustering results"
        kmh = kmeans_handler()
        k = iparams.isoform_cluster.n_clusters
        centroids, labels = kmh.run(x_set, k, flag_plot=iparams.flag_plot)
        print "Get solution pickle and cluster files list"
        sol_pickle, cluster_files = isoch.assign_cluster(frame_files_sel, labels, k, \
            os.path.join(iparams.run_no,self.module_name))
        #if more frames found, merge the sample frames to get a reference set
        #that can be used for breaking the ambiguity.
        if n_frames > iparams.isoform_cluster.n_selected_frames:
            print "Assign cluster_id for the remaining images."
            old_iparams_data = iparams.data[:]
            miller_array_ref_set = []
            from prime.command_line.postrefine import scale_frames, merge_frames
            for i in range(k):
                #generate a reference set from solved frames
                with open(cluster_files[i]) as f:
                    frame_files_processed = f.read().split('\n')[:-1]
                scaled_pres_set = scale_frames(
                    range(len(frame_files_processed)), frame_files_processed,
                    iparams)
                mdh, txt_merge_out = merge_frames(scaled_pres_set, iparams, \
                    mtz_out_prefix=os.path.join(self.module_name,'cluster_'+str(i)))
                miller_array_ref_set.append(mdh.miller_array_merge)
                txt_out += txt_merge_out
            #setup a list of remaining frames
            frame_files_remain = [
                frame for frame in frame_files if frame not in sol_pickle
            ]
            frame_files_remain_sel, obs_remain_sel_list = self.get_observation_set(iparams, \
                frame_files_remain, len(frame_files_remain))
            frames = [(i, frame_files_remain_sel[i], obs_remain_sel_list[i],
                       iparams, miller_array_ref_set)
                      for i in range(len(obs_remain_sel_list))]
            cc_results = pool_map(iterable=frames,
                                  func=solve_with_mtz_mproc,
                                  processes=iparams.n_processors)
            for result in cc_results:
                if result:
                    pickle_filename, cluster_id = result
                    sol_pickle[pickle_filename] = cluster_id
            iparams.data = old_iparams_data[:]
        #write out solution pickle
        write_out_solutions(iparams, sol_pickle)
        #write out text output
        txt = "Cluster images completed. Use cluster_0.lst - cluster_k.lst (for k clusters) for merging.\n"
        txt_out += txt
        print txt
        with open(os.path.join(iparams.run_no, self.module_name, 'log.txt'),
                  'a') as f:
            f.write(txt_out)
Пример #13
0
 def run(self, args):
   #read inputs
   from prime.postrefine.mod_input import process_input
   iparams, txt_out_input = process_input(args)
   print txt_out_input
   self.run_by_params(iparams)
Пример #14
0
 def run(self, args, avg_mode='average'):
   #read inputs
   from prime.postrefine.mod_input import process_input
   iparams, txt_out_input = process_input(argv=args, flag_check_exist=False)
   self.run_by_params(iparams, avg_mode=avg_mode)
Пример #15
0
def read_input(args):
  from prime.postrefine.mod_input import process_input
  iparams, txt_out_input = process_input(args)
  return iparams, txt_out_input