Ejemplo n.º 1
0
def client():
    result = []
    while True:
        comm.send(rank, dest=0)
        msg = comm.recv(source=0)
        if str(msg) == "endrun":
            break
        # receive contents for processing
        activity, act_params = msg
        if activity == "scale":
            frame_files, iparams = act_params
            from prime.postrefine import postref_handler

            prh = postref_handler()
            for frame_index, frame_file in enumerate(frame_files):
                pres, _ = prh.scale_frame_by_mean_I(frame_index, frame_file,
                                                    iparams, 0, "average")
                result.append(pres)
        if activity == "pre_merge":
            frame_results, iparams = act_params
            its = intensities_scaler()
            prep_output = its.prepare_output(frame_results, iparams, "average")
            result.append(prep_output)
        if activity == "merge":
            batch_prep, iparams = act_params
            its = intensities_scaler()
            mdh, _, txt_out_rejection = its.calc_avg_I_cpp(
                batch_prep, iparams, "average")
            result.append([mdh, txt_out_rejection])
    return result
Ejemplo n.º 2
0
def client():
    result = []
    prh = postref_handler()
    while True:
        comm.send(rank, dest=0)
        msg = comm.recv(source=0)
        if str(msg) == 'endrun': break
        #receive contents for processing
        activity, act_params = msg
        if activity == "scale":
            frame_files, iparams = act_params
            for frame_index, frame_file in enumerate(frame_files):
                pres, _ = prh.scale_frame_by_mean_I(frame_index, frame_file,
                                                    iparams, 0, 'average')
                result.append(pres)
        elif activity == "pre_merge":
            frame_results, iparams, avg_mode = act_params
            its = intensities_scaler()
            prep_output = its.prepare_output(frame_results, iparams, avg_mode)
            result.append(prep_output)
        elif activity == "merge":
            batch_prep, iparams, avg_mode = act_params
            its = intensities_scaler()
            mdh, _, txt_out_rejection = its.calc_avg_I_cpp(
                batch_prep, iparams, avg_mode)
            result.append([mdh, txt_out_rejection])
        elif activity == "postref":
            frame_no, frame_file, iparams, miller_array_ref, pres_in, avg_mode = act_params
            pres, _ = prh.postrefine_by_frame(frame_no, frame_file, iparams,
                                              miller_array_ref, pres_in,
                                              avg_mode)
            result.append(pres)
    return result
Ejemplo n.º 3
0
def merge_frames(pres_set,
                 iparams,
                 avg_mode='average',
                 mtz_out_prefix='mean_scaled'):
    """merge frames using average as the default"""
    miller_array_ref, txt_out = (None, '')
    intscal = intensities_scaler()
    if pres_set:
        prep_output = intscal.prepare_output(pres_set, iparams, avg_mode)
        if prep_output:
            mdh, _, txt_out_rejection = intscal.calc_avg_I_cpp(
                prep_output, iparams, avg_mode)
            #selet only indices with non-Inf non-Nan stats
            selections = flex.bool([
                False if (math.isnan(r0) or math.isinf(r0) or math.isnan(r1)
                          or math.isinf(r1)) else True
                for r0, r1 in zip(mdh.r_meas_div, mdh.r_meas_divisor)
            ])
            mdh.reduce_by_selection(selections)
            with open(iparams.run_no + '/rejections.txt', 'a') as f:
                f.write(txt_out_rejection)
            #merge all good indices
            mdh, txt_merge_mean_table = intscal.write_output(
                mdh, iparams, iparams.run_no + '/' + mtz_out_prefix, avg_mode)
            print txt_merge_mean_table
            print prep_output[-1]
            txt_out = txt_merge_mean_table + prep_output[-1]
    return mdh, txt_out
Ejemplo n.º 4
0
def merge_frames(pres_set, iparams, avg_mode='average', mtz_out_prefix='mean_scaled'):
  """merge frames using average as the default"""
  miller_array_ref, txt_out = (None,'')
  intscal = intensities_scaler()
  if pres_set:
    prep_output = intscal.prepare_output(pres_set, iparams, avg_mode)
    if prep_output:
      mdh, _, txt_out_rejection  = intscal.calc_avg_I_cpp(prep_output, iparams, avg_mode)
      #select only indices with non-Inf non-Nan stats
      selections = flex.bool([False if (math.isnan(r0) or math.isinf(r0) or math.isnan(r1) or math.isinf(r1)) else True for r0, r1  in zip(mdh.r_meas_div, mdh.r_meas_divisor)])
      mdh.reduce_by_selection(selections)

      #handle rejected reflections
      rejections = {}
      for reject in txt_out_rejection.split('\n'):
        data = reject.split()
        if data:
          if not data[0] in rejections:
            rejections[data[0]] = flex.miller_index()
          rejections[data[0]].append(tuple([int(_d) for _d in data[1:4]]))

      if len(rejections) > 0:
        if not iparams.rejections:
          iparams.rejections = {}
        iparams.rejections.update(rejections)

      #merge all good indices
      mdh, txt_merge_mean_table = intscal.write_output(mdh,
          iparams, iparams.run_no+'/'+mtz_out_prefix, avg_mode)
      print(txt_merge_mean_table)
      print(prep_output[-1])
      txt_out = txt_merge_mean_table + prep_output[-1]
  return mdh, txt_out
Ejemplo n.º 5
0
def merge(pres_result, iparams, txt_out_prefix, output_prefix, avg_mode):
    #pre-merge task
    if rank == 0:
        pres_results = sum(pres_result, [])
        print "Scaling/post-refinement is done on %d cores for %d frames" % (
            size, len(pres_results))
        master((pres_results, avg_mode), iparams, "pre_merge")
        premerge_result = []
    else:
        pres_results = None
        premerge_result = client()
    premerge_result = comm.gather(premerge_result, root=0)
    comm.Barrier()
    #merge task
    if rank == 0:
        print "Pre-merge is done on %d cores" % (len(premerge_result))
        master((premerge_result, avg_mode), iparams, "merge")
        merge_result = []
    else:
        merge_result = client()
    #finalize merge
    merge_result = comm.gather(merge_result, root=0)
    comm.Barrier()
    if rank == 0:
        print "Merge completed on %d cores" % (len(merge_result))
        merge_results = sum(merge_result, [])
        mdh = merge_data_handler()
        txt_out_rejection = ""
        for _mdh, _txt_out_rejection in merge_results:
            mdh.extend(_mdh)
            txt_out_rejection += _txt_out_rejection
        #selet only indices with non-Inf non-Nan stats
        selections = flex.bool([
            False if (math.isnan(r0) or math.isinf(r0) or math.isnan(r1)
                      or math.isinf(r1)) else True
            for r0, r1 in zip(mdh.r_meas_div, mdh.r_meas_divisor)
        ])
        mdh.reduce_by_selection(selections)
        its = intensities_scaler()
        mdh, txt_merge_mean_table = its.write_output(
            mdh, iparams, os.path.join(iparams.run_no, output_prefix),
            avg_mode)
        print txt_merge_mean_table
        #write log output
        with open(os.path.join(iparams.run_no, 'log.txt'), 'a') as f:
            f.write(txt_out_prefix + txt_merge_mean_table)
        with open(os.path.join(iparams.run_no, 'rejections.txt'), 'a') as f:
            f.write(txt_out_rejection)
    else:
        merge_results = None
        mdh = None
    return pres_results, mdh
Ejemplo n.º 6
0
def master(frame_objects, iparams, activity):
    if activity == "scale":
        n_batch = 1
        indices = range(0, len(frame_objects), n_batch)
        for i in indices:
            i_end = (i + n_batch if i + n_batch < len(frame_objects) else
                     len(frame_objects))
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            comm.send((activity, (frame_objects[i:i_end], iparams)),
                      dest=rankreq)
    if activity == "pre_merge":
        n_batch = int(len(frame_objects) / (size * 3))
        if n_batch < 10:
            n_batch = 10
        indices = range(0, len(frame_objects), n_batch)
        for i in indices:
            i_end = (i + n_batch if i + n_batch < len(frame_objects) else
                     len(frame_objects))
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            comm.send((activity, (frame_objects[i:i_end], iparams)),
                      dest=rankreq)
    if activity == "merge":
        its = intensities_scaler()
        cpo = its.combine_pre_merge(frame_objects, iparams)
        # assign at least 100k reflections at a time
        n_batch = int(1e5 / (len(cpo[1]) / cpo[0]))
        if n_batch < 1:
            n_batch = 1
        print("Merging with %d batch size" % (n_batch))
        indices = range(0, cpo[0], n_batch)
        for i in indices:
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            i_end = i + n_batch if i + n_batch < cpo[0] else cpo[0]
            sel = flex.bool([
                sel_l and sel_h
                for sel_l, sel_h in zip(cpo[1] >= i, cpo[1] < i_end)
            ])
            batch_prep = [cpo_elem.select(sel) for cpo_elem in cpo[1:13]]
            batch_prep.insert(0, i_end - i)
            batch_prep[1] -= i
            batch_prep.append(cpo[13])
            batch_prep.append(cpo[14])
            batch_prep.append(cpo[15].select(sel))
            batch_prep.append("")
            comm.send((activity, (tuple(batch_prep), iparams)), dest=rankreq)
    print("Master for %s is completed. Time to stop all %d clients" %
          (activity, size - 1))
    # stop clients
    for rankreq in range(size - 1):
        rankreq = comm.recv(source=MPI.ANY_SOURCE)
        comm.send("endrun", dest=rankreq)
Ejemplo n.º 7
0
def run(argv):
    comm.Barrier()
    start_time = MPI.Wtime()
    # broadcast parameters
    if rank == 0:
        iparams, txt_out_input = process_input(argv)
        iparams.flag_volume_correction = False
        iparams.flag_hush = True
        print(txt_out_input)
        frame_files = read_pickles(iparams.data)
    else:
        iparams = None
        frame_files = None
    comm.Barrier()
    # assign scaling task
    if rank == 0:
        master(frame_files, iparams, "scale")
        result = []
    else:
        result = client()
    result = comm.gather(result, root=0)
    comm.Barrier()
    # pre-merge task
    if rank == 0:
        results = sum(result, [])
        print("Scaling is done on %d cores for %d frames" %
              (size, len(results)))
        master(results, iparams, "pre_merge")
        result = []
    else:
        result = client()
    result = comm.gather(result, root=0)
    comm.Barrier()
    # merge task
    if rank == 0:
        print("Pre-merge is done on %d cores" % (len(result)))
        master(result, iparams, "merge")
        result = []
    else:
        result = client()
    # finalize merge
    result = comm.gather(result, root=0)
    comm.Barrier()
    if rank == 0:
        print("Merge completed on %d cores" % (len(result)))
        results = sum(result, [])
        mdh = merge_data_handler()
        txt_out_rejection = ""
        for _mdh, _txt_out_rejection in results:
            mdh.extend(_mdh)
            txt_out_rejection += _txt_out_rejection
        # selet only indices with non-Inf non-Nan stats
        selections = flex.bool([
            False if (math.isnan(r0) or math.isinf(r0) or math.isnan(r1)
                      or math.isinf(r1)) else True
            for r0, r1 in zip(mdh.r_meas_div, mdh.r_meas_divisor)
        ])
        mdh.reduce_by_selection(selections)
        its = intensities_scaler()
        mdh, txt_merge_mean_table = its.write_output(mdh, iparams, "test",
                                                     "average")
        print(txt_merge_mean_table)
    # collect time profile
    comm.Barrier()
    end_time = MPI.Wtime()
    txt_time = "Elapsed Time (s):%10.2f\n" % (end_time - start_time)
    # write log output
    if rank == 0:
        print(txt_time)
        with open(os.path.join(iparams.run_no, "log.txt"), "w") as f:
            f.write(txt_out_input + txt_merge_mean_table + txt_time)
        with open(os.path.join(iparams.run_no, "rejections.txt"), "w") as f:
            f.write(txt_out_rejection)
    MPI.Finalize()
Ejemplo n.º 8
0
def master(frame_token, iparams, activity):
    if activity == "scale":
        n_batch = 1
        frame_objects = frame_token
        indices = range(0, len(frame_objects), n_batch)
        for i in indices:
            i_end = i + n_batch if i + n_batch < len(frame_objects) else len(
                frame_objects)
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            comm.send((activity, (frame_objects[i:i_end], iparams)),
                      dest=rankreq)
    elif activity == "pre_merge":
        frame_objects, avg_mode = frame_token
        n_batch = int(len(frame_objects) / (size * 3))
        if n_batch < 10: n_batch = 10
        indices = range(0, len(frame_objects), n_batch)
        for i in indices:
            i_end = i + n_batch if i + n_batch < len(frame_objects) else len(
                frame_objects)
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            comm.send((activity, (frame_objects[i:i_end], iparams, avg_mode)),
                      dest=rankreq)
    elif activity == "merge":
        frame_objects, avg_mode = frame_token
        its = intensities_scaler()
        cpo = its.combine_pre_merge(frame_objects, iparams)
        #assign at least 100k reflections at a time
        n_batch = int(1e5 / (len(cpo[1]) / cpo[0]))
        if n_batch < 1: n_batch = 1
        print "Merging with %d batch size" % (n_batch)
        indices = range(0, cpo[0], n_batch)
        for i in indices:
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            i_end = i + n_batch if i + n_batch < cpo[0] else cpo[0]
            sel = flex.bool([
                sel_l and sel_h
                for sel_l, sel_h in zip(cpo[1] >= i, cpo[1] < i_end)
            ])
            batch_prep = [cpo_elem.select(sel) for cpo_elem in cpo[1:13]]
            batch_prep.insert(0, i_end - i)
            batch_prep[1] -= i
            batch_prep.append(cpo[13])
            batch_prep.append(cpo[14])
            batch_prep.append(cpo[15].select(sel))
            batch_prep.append("")
            comm.send((activity, (tuple(batch_prep), iparams, avg_mode)),
                      dest=rankreq)
    elif activity == "postref":
        frame_files, miller_array_ref, results, avg_mode = frame_token
        #convert results to a dict obj so that pickle_filename is the key
        pres_dict = {}
        for pres in results:
            if pres: pres_dict[pres.pickle_filename] = pres
        for i in range(len(frame_files)):
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            comm.send((activity, (i, frame_files[i], iparams, miller_array_ref,
                                  pres_dict[frame_files[i]] if frame_files[i]
                                  in pres_dict else None, avg_mode)),
                      dest=rankreq)
    print "Master for %s is completed. Time to stop all %d clients" % (
        activity, size - 1)
    # stop clients
    for rankreq in range(size - 1):
        rankreq = comm.recv(source=MPI.ANY_SOURCE)
        comm.send('endrun', dest=rankreq)