Esempio n. 1
0
 def run(self, args):
   #read inputs
   from prime.postrefine.mod_input import process_input, read_pickles
   iparams, txt_out_input = process_input(args)
   runh = run_handler()
   print txt_out_input
   #read all integration pickles
   frame_files = read_pickles(iparams.data)
   n_frames = len(frame_files)
   if n_frames == 0:
     print "No integration pickle found. Exit program."
     exit()
   frames = [(i, frame_files[i], iparams) for i in range(n_frames)]
   #run command to calculate mean_I
   mm_I = 0
   if iparams.flag_apply_b_by_frame == False:
     inp_pickle = {'iparams':iparams, 'frames':frames}
     pickle.dump(inp_pickle, open(iparams.run_no+'/inputs/0.inp',"wb"))
     call(["prime._genref_determine_mean_I", iparams.run_no+'/inputs/0.inp'])
     runh.check_done(iparams, n_frames)
     mm_I = calc_mean_of_mean_I(iparams)
   #run command for scaling
   frames = [(i, frame_files[i], iparams, mm_I) for i in range(n_frames)]
   inp_pickle = {'iparams':iparams, 'frames':frames}
   pickle.dump(inp_pickle, open(iparams.run_no+'/inputs/0.inp',"wb"))
   call(["prime._genref_scale_frame_by_mean_I", iparams.run_no+'/inputs/0.inp'])
   runh.check_done(iparams, n_frames)
   #write output to logfile
   txt_out = 'Scaling complete. Run prime.merge your_input_phil.phil to merge for a reference set.\n'
   print txt_out
   f = open(iparams.run_no+'/log.txt', 'w')
   f.write(txt_out_input+txt_out)
   f.close()
Esempio n. 2
0
def main(data, only_merohedral):
    indambh = indamb_handler()
    intFileList = read_pickles([data])
    if intFileList:
        obsList = {}
        for intFileName in intFileList:
            intPickle = read_frame(intFileName)
            try:
                obs = intPickle['observations'][0]
                obsList[intFileName] = obs
            except Exception as e:
                print "Warning:", e
                pass
        for key, value in obsList.iteritems():
            if only_merohedral:
                flag_all = False
            else:
                flag_all = True
            ops = indambh.generate_twin_operators(value, flag_all=flag_all)
            if ops:
                print os.path.basename(
                    key
                ), '%6.1f,%6.1f,%6.1f,%6.1f,%6.1f,%6.1f' % value.unit_cell(
                ).parameters(), ' '.join(
                    [op.operator.r().as_hkl() for op in ops])
            else:
                print os.path.basename(
                    key
                ), '%6.1f,%6.1f,%6.1f,%6.1f,%6.1f,%6.1f' % value.unit_cell(
                ).parameters(), 'Twining operators not found'
Esempio n. 3
0
def run(argv):
    comm.Barrier()
    start_time = MPI.Wtime()
    # broadcast parameters
    if rank == 0:
        iparams, txt_out_input = process_input(argv)
        iparams.flag_volume_correction = False
        iparams.flag_hush = True
        print(txt_out_input)
        frame_files = read_pickles(iparams.data)
    else:
        iparams = None
        frame_files = None
        txt_out_input = None
    comm.Barrier()
    # assign scaling task
    if rank == 0:
        master(frame_files, iparams, "scale")
        result = []
    else:
        result = client()
    result = comm.gather(result, root=0)
    comm.Barrier()
    _, mdh = merge(result, iparams, txt_out_input, "mean_scaled", "average")
    # postrefine task
    if rank == 0:
        n_postref_cycle = iparams.n_postref_cycle
        results = [None] * len(frame_files)
    else:
        n_postref_cycle = None
        results = None
    n_postref_cycle = comm.bcast(n_postref_cycle, root=0)
    avg_mode = "weighted"
    for i_iter in range(n_postref_cycle):
        comm.Barrier()
        if i_iter == n_postref_cycle - 1:
            avg_mode = "final"
        if rank == 0:
            print("Start post-refinement cycle %d" % (i_iter + 1))
            master(
                (frame_files, mdh.miller_array_merge, results, avg_mode),
                iparams,
                "postref",
            )
            result = []
        else:
            result = client()
        result = comm.gather(result, root=0)
        comm.Barrier()
        results, mdh = merge(result, iparams, "",
                             "postref_cycle_%d" % (i_iter + 1), avg_mode)
    # collect time profile
    comm.Barrier()
    end_time = MPI.Wtime()
    txt_time = "Elapsed Time (s):%10.2f\n" % (end_time - start_time)
    MPI.Finalize()
Esempio n. 4
0
 def run_by_params(self, iparams):
   runh = run_handler()
   #read all integration pickles
   from prime.postrefine.mod_input import read_pickles
   frame_files = read_pickles(iparams.data)
   n_frames = len(frame_files)
   if n_frames == 0:
     print "No integration pickle found. Exit program."
     exit()
   frames = [(i, frame_files[i], iparams) for i in range(n_frames)]
   mm_I = 0
   #run command to calculate mean_I
   if iparams.flag_apply_b_by_frame == False:
     inp_pickle = {'iparams':iparams, 'frames':frames}
     pickle.dump(inp_pickle, open(iparams.run_no+'/inputs/0.inp',"wb"))
     call(["prime._genref_determine_mean_I", iparams.run_no+'/inputs/0.inp'])
     runh.check_done(iparams, n_frames)
     mm_I = calc_mean_of_mean_I(iparams)
   #run command for scaling
   if iparams.queue.mode is None:
     #run single node
     frames = [(i, frame_files[i], iparams, mm_I) for i in range(n_frames)]
     inp_pickle = {'iparams':iparams, 'frames':frames}
     pickle.dump(inp_pickle, open(iparams.run_no+'/inputs/0.inp',"wb"))
     call(["prime._genref_scale_frame_by_mean_I", iparams.run_no+'/inputs/0.inp'])
   else:
     #run on n_nodes
     n_imgs_per_node = int(round(n_frames/iparams.queue.n_nodes))
     for i_node in range(iparams.queue.n_nodes):
       start_frame = i_node*n_imgs_per_node
       if i_node < iparams.queue.n_nodes - 1:
         end_frame = start_frame + n_imgs_per_node
       else:
         end_frame = n_frames
       frames = [(i, frame_files[i], iparams, mm_I) for i in range(start_frame, end_frame)]
       inp_pickle = {'iparams':iparams, 'frames':frames}
       pickle.dump(inp_pickle, open(iparams.run_no+'/inputs/'+str(i_node)+'.inp',"wb"))
       call(["bsub","-q",iparams.queue.qname,"-o",iparams.run_no+"/qout/qout_gr.txt","prime._genref_scale_frame_by_mean_I", iparams.run_no+"/inputs/"+str(i_node)+".inp"])
   runh.check_done(iparams, n_frames)
   #write output to logfile
   txt_out = 'Scaling complete. Run prime.merge your_input_phil.phil to merge for a reference set.\n'
   print txt_out
   f = open(iparams.run_no+'/log.txt', 'a')
   f.write(txt_out)
   f.close()
 def run(self, args):
   #read inputs
   from prime.postrefine.mod_input import process_input, read_pickles
   iparams, txt_out_input = process_input(args)
   print txt_out_input
   f = open(iparams.run_no+'/log.txt', 'w')
   f.write(txt_out_input)
   f.close()
   #if solution pickle is given, return the file name
   if iparams.indexing_ambiguity.index_basis_in is not None:
     if iparams.indexing_ambiguity.index_basis_in.endswith('.pickle'):
       return iparams.indexing_ambiguity.index_basis_in, iparams
   #read all integration pickles
   frame_files = read_pickles(iparams.data)
   n_frames = len(frame_files)
   if n_frames == 0:
     print "No integration pickle found. Exit program."
     return None, iparams
   #exit if no problem
   if self.should_terminate(iparams, frame_files[0]):
     print "No indexing ambiguity problem. Set index_ambiguity.mode = Forced and assigned_basis = list of basis formats to solve pseudo-twinning problem."
     return None, iparams
   #continue with (Auto - alt>1, find solution), (Auto - alt>1, mtz)
   #(Forced - assigned_basis, mtz), (Forced - assigned_basis, find solution)
   #*************************************************
   #if mtz file is given, use it to solve the problem
   sol_fname = iparams.run_no+'/index_ambiguity/solution_pickle.pickle'
   if iparams.indexing_ambiguity.index_basis_in is not None:
     if iparams.indexing_ambiguity.index_basis_in.endswith('.mtz'):
       mxh = mx_handler()
       flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(iparams.indexing_ambiguity.index_basis_in)
       if flag_ref_found == False:
         print "Reference mtz file not found. Set indexing_ambiguity.index_basis_in = None to enable auto generate the solutions."
         return None, iparams
       else:
         frames = [(i, frame_files[i], iparams, miller_array_ref) for i in range(n_frames)]
         cc_results = pool_map(
           iterable=frames,
           func=solve_with_mtz_mproc,
           processes=iparams.n_processors)
         sol_pickle = {}
         for result in cc_results:
           pickle_filename, index_basis = result
           sol_pickle[pickle_filename] = index_basis
         pickle.dump(sol_pickle, open(sol_fname,"wb"))
         return sol_fname, iparams
   #*************************************************
   #solve with Brehm & Diederichs - sample size n_sample_frames then bootstrap the rest
   frames = [(i, frame_files[i], iparams) for i in random.sample(range(n_frames), iparams.indexing_ambiguity.n_sample_frames)]
   #get observations list
   print "Reading observations"
   alt_dict_results = pool_map(
         iterable=frames,
         func=get_obs_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   obs_list = []
   for result in alt_dict_results:
     alt_dict, pickle_filename = result
     if alt_dict is not None:
       for key in alt_dict.keys():
         frame_dup_files.append(pickle_filename)
         frame_keys.append(key)
         obs_list.append(alt_dict[key])
   frames = [(i, frame_dup_files[i], frame_keys[i], obs_list[i], obs_list) for i in range(len(frame_dup_files))]
   #calculate r
   print "Calculating R"
   calc_r_results = pool_map(
         iterable=frames,
         func=calculate_r_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   r_matrix = []
   for result in calc_r_results:
     if result is not None:
       pickle_filename, index_basis, r_set = result
       frame_dup_files.append(pickle_filename)
       frame_keys.append(index_basis)
       if len(r_matrix) == 0:
         r_matrix = r_set
       else:
         r_matrix = np.append(r_matrix, r_set, axis=0)
   #choose groups with best CC
   print "Selecting frames with best R"
   i_mean_r = np.argsort(np.mean(r_matrix, axis=1))[::-1]
   r_matrix_sorted = r_matrix[i_mean_r]
   frame_dup_files_sorted = np.array(frame_dup_files)[i_mean_r]
   frame_keys_sorted = np.array(frame_keys)[i_mean_r]
   frame_dup_files_sel = []
   for frame_file, frame_key, r_set in zip(frame_dup_files_sorted, frame_keys_sorted, r_matrix_sorted):
     if frame_file not in frame_dup_files_sel:
       frame_dup_files_sel.append(frame_file)
       print frame_file, frame_key, np.mean(r_set)
       if len(frame_dup_files_sel) >= iparams.indexing_ambiguity.n_selected_frames:
         print 'Found all %6.0f good frames'%(len(frame_dup_files_sel))
         break
   ##
   #rebuild observations and r_matrix
   frames = [(i, frame_dup_files_sel[i], iparams) for i in range(len(frame_dup_files_sel))]
   #get observations list
   print "Re-reading observations"
   alt_dict_results = pool_map(
         iterable=frames,
         func=get_obs_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   obs_list = []
   for result in alt_dict_results:
     alt_dict, pickle_filename = result
     if alt_dict is not None:
       for key in alt_dict.keys():
         frame_dup_files.append(pickle_filename)
         frame_keys.append(key)
         obs_list.append(alt_dict[key])
   frames = [(i, frame_dup_files[i], frame_keys[i], obs_list[i], obs_list) for i in range(len(frame_dup_files))]
   #calculate r
   print "Re-calculating R"
   calc_r_results = pool_map(
         iterable=frames,
         func=calculate_r_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   r_matrix = []
   for result in calc_r_results:
     if result is not None:
       pickle_filename, index_basis, r_set = result
       frame_dup_files.append(pickle_filename)
       frame_keys.append(index_basis)
       if len(r_matrix) == 0:
         r_matrix = r_set
       else:
         r_matrix = np.append(r_matrix, r_set, axis=0)
   print "Minimizing frame distance"
   idah = indamb_handler()
   x_set = idah.optimize(r_matrix, flag_plot=iparams.flag_plot)
   x_pickle = {'frame_dup_files':frame_dup_files, 'frame_keys':frame_keys, \
     'r_matrix':r_matrix, 'x_set':x_set}
   pickle.dump(x_pickle, open(iparams.run_no+'/index_ambiguity/x.out',"wb"))
   print "Clustering results"
   kmh = kmeans_handler()
   k = 2**(len(idah.get_observations(frame_dup_files[0], iparams))-1)
   centroids, labels = kmh.run(x_set, k, flag_plot=iparams.flag_plot)
   print "Get solution pickle"
   sample_fname = iparams.run_no+'/index_ambiguity/sample.lst'
   sol_pickle = idah.assign_basis(frame_dup_files, frame_keys, labels, k, sample_fname)
   pickle.dump(sol_pickle, open(sol_fname,"wb"))
   #if more frames found, merge the sample frames to get a reference set
   #that can be used for breaking the ambiguity.
   if n_frames > iparams.indexing_ambiguity.n_selected_frames:
     print "Breaking the indexing ambiguity for the remaining images."
     old_iparams_data = iparams.data[:]
     iparams.data = [sample_fname]
     iparams.indexing_ambiguity.index_basis_in = sol_fname
     grh = genref_handler()
     grh.run_by_params(iparams)
     mh = merge_handler()
     mh.run_by_params(iparams)
     DIR = iparams.run_no+'/mtz/'
     file_no_list = [int(fname.split('.')[0]) for fname in os.listdir(DIR)]
     if len(file_no_list) > 0:
       hklref_indamb = DIR + str(max(file_no_list)) + '.mtz'
       print "Bootstrap reference reflection set:", hklref_indamb
       #setup a list of remaining frames
       frame_files_remain = []
       for frame in frame_files:
         if frame not in sol_pickle:
           frame_files_remain.append(frame)
       #determine index basis
       mxh = mx_handler()
       flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(hklref_indamb)
       frames = [(i, frame_files_remain[i], iparams, miller_array_ref) for i in range(len(frame_files_remain))]
       cc_results = pool_map(
         iterable=frames,
         func=solve_with_mtz_mproc,
         processes=iparams.n_processors)
       for result in cc_results:
         pickle_filename, index_basis = result
         sol_pickle[pickle_filename] = index_basis
     iparams.data = old_iparams_data[:]
   #write out solution pickle
   pickle.dump(sol_pickle, open(sol_fname,"wb"))
   #write out text output
   txt_out = "Solving indexing ambiguity complete. Solution file saved to "+sol_fname+"\n"
   f = open(iparams.run_no+'/log.txt', 'a')
   f.write(txt_out)
   f.close()
   return sol_fname, iparams
Esempio n. 6
0
 def run_by_params(self, iparams):
     runh = run_handler()
     #read all integration pickles
     from prime.postrefine.mod_input import read_pickles
     frame_files = read_pickles(iparams.data)
     n_frames = len(frame_files)
     if n_frames == 0:
         print "No integration pickle found. Exit program."
         exit()
     frames = [(i, frame_files[i], iparams) for i in range(n_frames)]
     mm_I = 0
     #run command to calculate mean_I
     if iparams.flag_apply_b_by_frame == False:
         inp_pickle = {'iparams': iparams, 'frames': frames}
         pickle.dump(inp_pickle, open(iparams.run_no + '/inputs/0.inp',
                                      "wb"))
         call([
             "prime._genref_determine_mean_I",
             iparams.run_no + '/inputs/0.inp'
         ])
         runh.check_done(iparams, n_frames)
         mm_I = calc_mean_of_mean_I(iparams)
     #run command for scaling
     if iparams.queue.mode is None:
         #run single node
         frames = [(i, frame_files[i], iparams, mm_I)
                   for i in range(n_frames)]
         inp_pickle = {'iparams': iparams, 'frames': frames}
         pickle.dump(inp_pickle, open(iparams.run_no + '/inputs/0.inp',
                                      "wb"))
         call([
             "prime._genref_scale_frame_by_mean_I",
             iparams.run_no + '/inputs/0.inp'
         ])
     else:
         #run on n_nodes
         n_imgs_per_node = int(round(n_frames / iparams.queue.n_nodes))
         for i_node in range(iparams.queue.n_nodes):
             start_frame = i_node * n_imgs_per_node
             if i_node < iparams.queue.n_nodes - 1:
                 end_frame = start_frame + n_imgs_per_node
             else:
                 end_frame = n_frames
             frames = [(i, frame_files[i], iparams, mm_I)
                       for i in range(start_frame, end_frame)]
             inp_pickle = {'iparams': iparams, 'frames': frames}
             pickle.dump(
                 inp_pickle,
                 open(iparams.run_no + '/inputs/' + str(i_node) + '.inp',
                      "wb"))
             call([
                 "bsub", "-q", iparams.queue.qname, "-o",
                 iparams.run_no + "/qout/qout_gr.txt",
                 "prime._genref_scale_frame_by_mean_I",
                 iparams.run_no + "/inputs/" + str(i_node) + ".inp"
             ])
     runh.check_done(iparams, n_frames)
     #write output to logfile
     txt_out = 'Scaling complete. Run prime.merge your_input_phil.phil to merge for a reference set.\n'
     print txt_out
     f = open(iparams.run_no + '/log.txt', 'a')
     f.write(txt_out)
     f.close()
Esempio n. 7
0
def run(argv):
    comm.Barrier()
    start_time = MPI.Wtime()
    # broadcast parameters
    if rank == 0:
        iparams, txt_out_input = process_input(argv)
        iparams.flag_volume_correction = False
        iparams.flag_hush = True
        print(txt_out_input)
        frame_files = read_pickles(iparams.data)
    else:
        iparams = None
        frame_files = None
    comm.Barrier()
    # assign scaling task
    if rank == 0:
        master(frame_files, iparams, "scale")
        result = []
    else:
        result = client()
    result = comm.gather(result, root=0)
    comm.Barrier()
    # pre-merge task
    if rank == 0:
        results = sum(result, [])
        print("Scaling is done on %d cores for %d frames" %
              (size, len(results)))
        master(results, iparams, "pre_merge")
        result = []
    else:
        result = client()
    result = comm.gather(result, root=0)
    comm.Barrier()
    # merge task
    if rank == 0:
        print("Pre-merge is done on %d cores" % (len(result)))
        master(result, iparams, "merge")
        result = []
    else:
        result = client()
    # finalize merge
    result = comm.gather(result, root=0)
    comm.Barrier()
    if rank == 0:
        print("Merge completed on %d cores" % (len(result)))
        results = sum(result, [])
        mdh = merge_data_handler()
        txt_out_rejection = ""
        for _mdh, _txt_out_rejection in results:
            mdh.extend(_mdh)
            txt_out_rejection += _txt_out_rejection
        # selet only indices with non-Inf non-Nan stats
        selections = flex.bool([
            False if (math.isnan(r0) or math.isinf(r0) or math.isnan(r1)
                      or math.isinf(r1)) else True
            for r0, r1 in zip(mdh.r_meas_div, mdh.r_meas_divisor)
        ])
        mdh.reduce_by_selection(selections)
        its = intensities_scaler()
        mdh, txt_merge_mean_table = its.write_output(mdh, iparams, "test",
                                                     "average")
        print(txt_merge_mean_table)
    # collect time profile
    comm.Barrier()
    end_time = MPI.Wtime()
    txt_time = "Elapsed Time (s):%10.2f\n" % (end_time - start_time)
    # write log output
    if rank == 0:
        print(txt_time)
        with open(os.path.join(iparams.run_no, "log.txt"), "w") as f:
            f.write(txt_out_input + txt_merge_mean_table + txt_time)
        with open(os.path.join(iparams.run_no, "rejections.txt"), "w") as f:
            f.write(txt_out_rejection)
    MPI.Finalize()
 def run(self, args):
   #read inputs
   from prime.postrefine.mod_input import process_input, read_pickles
   iparams, txt_out_input = process_input(args)
   print txt_out_input
   f = open(iparams.run_no+'/log.txt', 'w')
   f.write(txt_out_input)
   f.close()
   #if solution pickle is given, return the file name
   if iparams.indexing_ambiguity.index_basis_in is not None:
     if iparams.indexing_ambiguity.index_basis_in.endswith('.pickle'):
       return iparams.indexing_ambiguity.index_basis_in, iparams
   #read all integration pickles
   frame_files = read_pickles(iparams.data)
   n_frames = len(frame_files)
   if n_frames == 0:
     print "No integration pickle found. Exit program."
     return None, iparams
   #exit if no problem
   if self.should_terminate(iparams, frame_files[0]):
     print "No indexing ambiguity problem. Set index_ambiguity.mode = Forced and assigned_basis = list of basis formats to solve pseudo-twinning problem."
     return None, iparams
   #continue with (Auto - alt>1, find solution), (Auto - alt>1, mtz)
   #(Forced - assigned_basis, mtz), (Forced - assigned_basis, find solution)
   #*************************************************
   #if mtz file is given, use it to solve the problem
   sol_fname = iparams.run_no+'/index_ambiguity/solution_pickle.pickle'
   if iparams.indexing_ambiguity.index_basis_in is not None:
     if iparams.indexing_ambiguity.index_basis_in.endswith('.mtz'):
       mxh = mx_handler()
       flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(iparams.indexing_ambiguity.index_basis_in)
       if flag_ref_found == False:
         print "Reference mtz file not found. Set indexing_ambiguity.index_basis_in = None to enable auto generate the solutions."
         return None, iparams
       else:
         frames = [(i, frame_files[i], iparams, miller_array_ref) for i in range(n_frames)]
         cc_results = pool_map(
           iterable=frames,
           func=solve_with_mtz_mproc,
           processes=iparams.n_processors)
         sol_pickle = {}
         for result in cc_results:
           pickle_filename, index_basis = result
           sol_pickle[pickle_filename] = index_basis
         pickle.dump(sol_pickle, open(sol_fname,"wb"))
         return sol_fname, iparams
   #*************************************************
   #solve with Brehm & Diederichs - sample size n_sample_frames then bootstrap the rest
   frames = [(i, frame_files[i], iparams) for i in random.sample(range(n_frames), iparams.indexing_ambiguity.n_sample_frames)]
   #get observations list
   print "Reading observations"
   alt_dict_results = pool_map(
         iterable=frames,
         func=get_obs_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   obs_list = []
   for result in alt_dict_results:
     alt_dict, pickle_filename = result
     if alt_dict is not None:
       for key in alt_dict.keys():
         frame_dup_files.append(pickle_filename)
         frame_keys.append(key)
         obs_list.append(alt_dict[key])
   frames = [(i, frame_dup_files[i], frame_keys[i], obs_list[i], obs_list) for i in range(len(frame_dup_files))]
   #calculate r
   print "Calculating R"
   calc_r_results = pool_map(
         iterable=frames,
         func=calculate_r_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   r_matrix = []
   for result in calc_r_results:
     if result is not None:
       pickle_filename, index_basis, r_set = result
       frame_dup_files.append(pickle_filename)
       frame_keys.append(index_basis)
       if len(r_matrix) == 0:
         r_matrix = r_set
       else:
         r_matrix = np.append(r_matrix, r_set, axis=0)
   #choose groups with best CC
   print "Selecting frames with best R"
   i_mean_r = np.argsort(np.mean(r_matrix, axis=1))[::-1]
   r_matrix_sorted = r_matrix[i_mean_r]
   frame_dup_files_sorted = np.array(frame_dup_files)[i_mean_r]
   frame_keys_sorted = np.array(frame_keys)[i_mean_r]
   frame_dup_files_sel = []
   for frame_file, frame_key, r_set in zip(frame_dup_files_sorted, frame_keys_sorted, r_matrix_sorted):
     if frame_file not in frame_dup_files_sel:
       frame_dup_files_sel.append(frame_file)
       print frame_file, frame_key, np.mean(r_set)
       if len(frame_dup_files_sel) >= iparams.indexing_ambiguity.n_selected_frames:
         print 'Found all %6.0f good frames'%(len(frame_dup_files_sel))
         break
   ##
   #rebuild observations and r_matrix
   frames = [(i, frame_dup_files_sel[i], iparams) for i in range(len(frame_dup_files_sel))]
   #get observations list
   print "Re-reading observations"
   alt_dict_results = pool_map(
         iterable=frames,
         func=get_obs_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   obs_list = []
   for result in alt_dict_results:
     alt_dict, pickle_filename = result
     if alt_dict is not None:
       for key in alt_dict.keys():
         frame_dup_files.append(pickle_filename)
         frame_keys.append(key)
         obs_list.append(alt_dict[key])
   frames = [(i, frame_dup_files[i], frame_keys[i], obs_list[i], obs_list) for i in range(len(frame_dup_files))]
   #calculate r
   print "Re-calculating R"
   calc_r_results = pool_map(
         iterable=frames,
         func=calculate_r_mproc,
         processes=iparams.n_processors)
   frame_dup_files = []
   frame_keys = []
   r_matrix = []
   for result in calc_r_results:
     if result is not None:
       pickle_filename, index_basis, r_set = result
       frame_dup_files.append(pickle_filename)
       frame_keys.append(index_basis)
       if len(r_matrix) == 0:
         r_matrix = r_set
       else:
         r_matrix = np.append(r_matrix, r_set, axis=0)
   print "Minimizing frame distance"
   idah = indamb_handler()
   x_set = idah.optimize(r_matrix, flag_plot=iparams.flag_plot)
   x_pickle = {'frame_dup_files':frame_dup_files, 'frame_keys':frame_keys, \
     'r_matrix':r_matrix, 'x_set':x_set}
   pickle.dump(x_pickle, open(iparams.run_no+'/index_ambiguity/x.out',"wb"))
   print "Clustering results"
   kmh = kmeans_handler()
   k = 2**(len(idah.get_observations(frame_dup_files[0], iparams))-1)
   centroids, labels = kmh.run(x_set, k, flag_plot=iparams.flag_plot)
   print "Get solution pickle"
   sample_fname = iparams.run_no+'/index_ambiguity/sample.lst'
   sol_pickle = idah.assign_basis(frame_dup_files, frame_keys, labels, k, sample_fname)
   pickle.dump(sol_pickle, open(sol_fname,"wb"))
   #if more frames found, merge the sample frames to get a reference set
   #that can be used for breaking the ambiguity.
   if n_frames > iparams.indexing_ambiguity.n_selected_frames:
     print "Breaking the indexing ambiguity for the remaining images."
     old_iparams_data = iparams.data[:]
     iparams.data = [sample_fname]
     iparams.indexing_ambiguity.index_basis_in = sol_fname
     grh = genref_handler()
     grh.run_by_params(iparams)
     mh = merge_handler()
     mh.run_by_params(iparams)
     DIR = iparams.run_no+'/mtz/'
     file_no_list = [int(fname.split('.')[0]) for fname in os.listdir(DIR)]
     if len(file_no_list) > 0:
       hklref_indamb = DIR + str(max(file_no_list)) + '.mtz'
       print "Bootstrap reference reflection set:", hklref_indamb
       #setup a list of remaining frames
       frame_files_remain = []
       for frame in frame_files:
         if frame not in sol_pickle:
           frame_files_remain.append(frame)
       #determine index basis
       mxh = mx_handler()
       flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(hklref_indamb)
       frames = [(i, frame_files_remain[i], iparams, miller_array_ref) for i in range(len(frame_files_remain))]
       cc_results = pool_map(
         iterable=frames,
         func=solve_with_mtz_mproc,
         processes=iparams.n_processors)
       for result in cc_results:
         pickle_filename, index_basis = result
         sol_pickle[pickle_filename] = index_basis
     iparams.data = old_iparams_data[:]
   #write out solution pickle
   pickle.dump(sol_pickle, open(sol_fname,"wb"))
   #write out text output
   txt_out = "Solving indexing ambiguity complete. Solution file saved to "+sol_fname+"\n"
   f = open(iparams.run_no+'/log.txt', 'a')
   f.write(txt_out)
   f.close()
   return sol_fname, iparams
Esempio n. 9
0
def run(argv):
    #capture starting time
    time_global_start = datetime.now()
    import logging
    logging.captureWarnings(True)
    formatter = logging.Formatter('%(asctime)s\t%(levelname)s\t%(message)s')
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.ERROR)
    console_handler.setFormatter(formatter)
    logging.getLogger().addHandler(console_handler)
    logging.getLogger('py.warnings').addHandler(console_handler)
    logging.basicConfig(format='%(asctime)s\t%(levelname)s\t%(message)s',
                        level=logging.DEBUG)
    #0.1 determine indexing ambiguity and setup iparams
    txt_indexing_ambiguity = "Determine if there is an indexing ambiguity on the dataset"
    print txt_indexing_ambiguity
    idah = indexing_ambiguity_handler()
    sol_fname, iparams = idah.run(argv)
    if sol_fname is None:
        print "No ambiguity."
        txt_indexing_ambiguity += "\nNo ambiguity."
    else:
        print "Ambiguity is solved. Solution file was saved to :" + str(
            sol_fname)
        txt_indexing_ambiguity += "Ambiguity is solved. Solution file was saved to :" + str(
            sol_fname)
        iparams.indexing_ambiguity.index_basis_in = sol_fname
    #0.2 setup parameters
    iparams.flag_volume_correction = False
    if iparams.partiality_model == "Lognormal":
        iparams.voigt_nu = 0.008  #use voigt_nu as lognpdf zero parameter
    #0.3 read frames
    frame_files = read_pickles(iparams.data)
    frames = range(len(frame_files))
    #1. prepare reference miller array
    txt_merge_mean = 'Generating a reference set (will not be used if hklrefin is set)'
    print txt_merge_mean
    #Always generate the mean-intensity scaled set.
    scaled_pres_set = scale_frames(frames, frame_files, iparams)
    mdh, _txt_merge_mean = merge_frames(scaled_pres_set, iparams)
    miller_array_ref = mdh.miller_array_merge
    txt_merge_mean += '\n' + _txt_merge_mean
    if not iparams.n_postref_cycle:
        with open(iparams.run_no + '/log.txt', 'a') as f:
            f.write(txt_indexing_ambiguity + txt_merge_mean)
        raise Usage(
            "No. of post-refinement cycle was set to 0. Exit without post-refinement."
        )
    if iparams.hklrefin is not None:
        mxh = mx_handler()
        _, miller_array_ref = mxh.get_miller_array_from_reflection_file(
            iparams.hklrefin)
    if miller_array_ref is None:
        raise Usage(
            "Problem with the assigned reference set. Try setting hklrefin=None and rerun the program."
        )
    #2. Post-refinement
    txt_merge_postref = ''
    postref_pres_set = [None] * len(frames)
    avg_mode = 'weighted'
    for i_iter in xrange(iparams.n_postref_cycle):
        if i_iter == (iparams.n_postref_cycle - 1): avg_mode = 'final'
        postref_good_pres_set, postref_pres_set, _txt_merge_postref = postrefine_frames(
            i_iter, frames, frame_files, iparams, postref_pres_set,
            miller_array_ref, avg_mode)
        if postref_good_pres_set:
            mdh, _txt_merge_postref = merge_frames(
                postref_good_pres_set,
                iparams,
                avg_mode=avg_mode,
                mtz_out_prefix='postref_cycle_' + str(i_iter + 1))
            miller_array_ref = mdh.miller_array_merge
            txt_merge_postref += _txt_merge_postref
        else:
            raise Usage(
                "Problem with post-refinement. No images refined. Please check your input file."
            )
    #3. collect caculating time
    time_global_end = datetime.now()
    time_global_spent = time_global_end - time_global_start
    txt_out_time_spent = 'Total calculation time: '+'{0:.2f}'.format(time_global_spent.seconds)+ \
        ' seconds\nFinished: '+time_global_end.strftime("%A %d. %B %Y %H:%M:%S")+'\n'
    print txt_out_time_spent
    txt_out = txt_indexing_ambiguity + txt_merge_mean + txt_merge_postref + txt_out_time_spent
    with open(os.path.join(iparams.run_no, 'log.txt'), 'a') as f:
        f.write(txt_out)
    with open(os.path.join(iparams.run_no, '.done'), 'w') as f:
        f.write('Done')
    return mdh
Esempio n. 10
0
        exit()
    if pixel_size_mm is None:
        print "Please specify pixel size (eg. pixel_size_mm=0.079346)"
        exit()
    return data, hklrefin, pixel_size_mm, target_unit_cell, d_min, d_max


if (__name__ == "__main__"):
    uc_tol = 3
    ry, rz, re, rotx, roty = (0, 0, 0.008, 0, 0)
    flag_beam_divergence = False
    lambda_template = flex.double(range(-50, 50, 1)) / 1000
    #0 .read input parameters and frames (pickle files)
    data, hklrefin, pixel_size_mm, target_unit_cell, \
      d_min, d_max = read_input(args = sys.argv[1:])
    frame_files = read_pickles(data)
    for pickle_filename in frame_files:
        observations_pickle = read_frame(pickle_filename)
        pickle_filename_arr = pickle_filename.split('/')
        pickle_filename_only = pickle_filename_arr[len(pickle_filename_arr) -
                                                   1]
        mxh = mx_handler()
        flag_hklisoin_found, miller_array_iso = mxh.get_miller_array_from_reflection_file(
            hklrefin)
        observations = observations_pickle["observations"][0]
        #check if the uc is good
        flag_good_unit_cell = good_unit_cell(
            observations.unit_cell().parameters(),
            None,
            uc_tol,
            target_unit_cell=target_unit_cell)
Esempio n. 11
0
    def run(self, args):
        #read inputs
        from prime.postrefine.mod_input import process_input, read_pickles
        iparams, txt_out_input = process_input(args)
        print txt_out_input
        with open(os.path.join(iparams.run_no, self.module_name, 'log.txt'),
                  'w') as f:
            f.write(txt_out_input)
        #read all integration pickles
        frame_files = read_pickles(iparams.data)
        n_frames = len(frame_files)
        if n_frames == 0:
            print "No integration pickle found. Exit program."
            return None, iparams
        #start
        if iparams.isoform_cluster.isorefin:
            #get collection of iso. ref. reflection set.
            mxh = mx_handler()
            miller_array_ref_set = []
            for isorefin in iparams.isoform_cluster.isorefin:
                flag_ref_found, miller_array_ref = mxh.get_miller_array_from_reflection_file(
                    isorefin)
                if flag_ref_found:
                    miller_array_ref_set.append(miller_array_ref)
            #get observation list
            frame_files_sel, obs_list = self.get_observation_set(
                iparams, frame_files, n_frames)
            if miller_array_ref_set:
                frames = [(i, frame_files_sel[i], obs_list[i], iparams,
                           miller_array_ref_set) for i in range(len(obs_list))]
                cc_results = pool_map(iterable=frames,
                                      func=solve_with_mtz_mproc,
                                      processes=iparams.n_processors)
                sol_pickle = {}
                for result in cc_results:
                    pickle_filename, cluster_id = result
                    sol_pickle[pickle_filename] = cluster_id
                write_out_solutions(iparams, sol_pickle)
                txt_out = "Cluster images with given " + str(
                    len(miller_array_ref_set)
                ) + " mtz files completed. Use cluster_0.lst - cluster_k.lst (for k clusters) for merging.\n"
                print txt_out
                with open(
                        os.path.join(iparams.run_no, self.module_name,
                                     'log.txt'), 'a') as f:
                    f.write(txt_out)
            return

        #*************************************************
        #solve with Brehm & Diederichs - sample size n_sample_frames then bootstrap the rest
        txt_out = "Cluster images with B&D algorithms.\n"
        frame_files_sel, obs_list = self.get_observation_set(
            iparams, frame_files, iparams.isoform_cluster.n_sample_frames)
        frames = [(i, frame_files_sel[i], obs_list[i], obs_list)
                  for i in range(len(frame_files_sel))]
        #calculate r
        print "Calculating R"
        calc_r_results = pool_map(iterable=frames,
                                  func=calculate_r_mproc,
                                  processes=iparams.n_processors)
        frame_files_sel = []
        r_matrix = []
        obs_list = []
        for result in calc_r_results:
            if result:
                pickle_filename, r_set, obs = result
                frame_files_sel.append(pickle_filename)
                obs_list.append(obs)
                if len(r_matrix) == 0:
                    r_matrix = r_set
                else:
                    r_matrix = np.append(r_matrix, r_set, axis=0)
        #choose groups with best R
        print "Selecting frames with best R"
        i_mean_r = np.argsort(np.mean(r_matrix, axis=1))[::-1]
        r_matrix_sorted = r_matrix[i_mean_r]
        frame_files_sorted = np.array(frame_files_sel)[i_mean_r]
        obs_list_sorted = np.array(obs_list)[i_mean_r]
        frame_files_sel = []
        obs_sel = []
        for frame_file, r_set, obs in zip(frame_files_sorted, r_matrix_sorted,
                                          obs_list_sorted):
            if frame_file not in frame_files_sel:
                frame_files_sel.append(frame_file)
                obs_sel.append(obs)
                print frame_file, np.mean(r_set)
                if len(frame_files_sel
                       ) >= iparams.isoform_cluster.n_selected_frames:
                    print 'Found all %6.0f good frames' % (
                        len(frame_files_sel))
                    break
        #Recalculate r for the new selected list
        frames = [(i, frame_files_sel[i], obs_sel[i], obs_sel)
                  for i in range(len(frame_files_sel))]
        print "Re-calculating R"
        calc_r_results = pool_map(iterable=frames,
                                  func=calculate_r_mproc,
                                  processes=iparams.n_processors)
        frame_files_sel = []
        r_matrix = []
        obs_list = []
        for result in calc_r_results:
            if result:
                pickle_filename, r_set, obs = result
                frame_files_sel.append(pickle_filename)
                obs_list.append(obs)
                if len(r_matrix) == 0:
                    r_matrix = r_set
                else:
                    r_matrix = np.append(r_matrix, r_set, axis=0)
        print "Minimizing frame distance"
        isoch = isoform_cluster_handler()
        x_set = isoch.optimize(r_matrix, flag_plot=iparams.flag_plot)
        print "Clustering results"
        kmh = kmeans_handler()
        k = iparams.isoform_cluster.n_clusters
        centroids, labels = kmh.run(x_set, k, flag_plot=iparams.flag_plot)
        print "Get solution pickle and cluster files list"
        sol_pickle, cluster_files = isoch.assign_cluster(frame_files_sel, labels, k, \
            os.path.join(iparams.run_no,self.module_name))
        #if more frames found, merge the sample frames to get a reference set
        #that can be used for breaking the ambiguity.
        if n_frames > iparams.isoform_cluster.n_selected_frames:
            print "Assign cluster_id for the remaining images."
            old_iparams_data = iparams.data[:]
            miller_array_ref_set = []
            from prime.command_line.postrefine import scale_frames, merge_frames
            for i in range(k):
                #generate a reference set from solved frames
                with open(cluster_files[i]) as f:
                    frame_files_processed = f.read().split('\n')[:-1]
                scaled_pres_set = scale_frames(
                    range(len(frame_files_processed)), frame_files_processed,
                    iparams)
                mdh, txt_merge_out = merge_frames(scaled_pres_set, iparams, \
                    mtz_out_prefix=os.path.join(self.module_name,'cluster_'+str(i)))
                miller_array_ref_set.append(mdh.miller_array_merge)
                txt_out += txt_merge_out
            #setup a list of remaining frames
            frame_files_remain = [
                frame for frame in frame_files if frame not in sol_pickle
            ]
            frame_files_remain_sel, obs_remain_sel_list = self.get_observation_set(iparams, \
                frame_files_remain, len(frame_files_remain))
            frames = [(i, frame_files_remain_sel[i], obs_remain_sel_list[i],
                       iparams, miller_array_ref_set)
                      for i in range(len(obs_remain_sel_list))]
            cc_results = pool_map(iterable=frames,
                                  func=solve_with_mtz_mproc,
                                  processes=iparams.n_processors)
            for result in cc_results:
                if result:
                    pickle_filename, cluster_id = result
                    sol_pickle[pickle_filename] = cluster_id
            iparams.data = old_iparams_data[:]
        #write out solution pickle
        write_out_solutions(iparams, sol_pickle)
        #write out text output
        txt = "Cluster images completed. Use cluster_0.lst - cluster_k.lst (for k clusters) for merging.\n"
        txt_out += txt
        print txt
        with open(os.path.join(iparams.run_no, self.module_name, 'log.txt'),
                  'a') as f:
            f.write(txt_out)
Esempio n. 12
0
            if pair[0] == "data":
                data.append(pair[1])
            elif pair[0] == "data_sweep":
                data_sweep = pair[1]
    if len(data) == 0:
        print("Please input all parameters")
        exit()
    return data, data_sweep


if __name__ == "__main__":
    # Read input parameters and frames (pickle files)
    data_fine, data_sweep = read_input(args=sys.argv[1:])
    pixel_size_mm = 0.079346
    # read all the pickle files from fine-slice data
    frames_fine = read_pickles(data_fine)
    # get a sample reflections
    sample_no = 0
    obs_fine_sample = None
    for i in range(2000):
        frame = frames_fine[i]
        pickle_fine = read_frame(frame)
        obs_fine = pickle_fine["observations"][0]
        obs_fine = obs_fine.select(obs_fine.data() > 0)
        if len(obs_fine.data()) > 5:
            print(frame)
            for index, d, I, sigI in zip(
                    obs_fine.indices(),
                    obs_fine.d_spacings().data(),
                    obs_fine.data(),
                    obs_fine.sigmas(),
Esempio n. 13
0
 def __init__(self, iparams):
   self.iparams = iparams
   self.frame_files = read_pickles(iparams.data)
   self.idv_range = len(self.frame_files)