예제 #1
0
파일: step5_laue.py 프로젝트: nksauter/LS49
def tst_all(quick=False, prefix="step5"):
    from LS49.spectra.generate_spectra import spectra_simulation
    SS = spectra_simulation()
    iterator = SS.generate_recast_renormalized_images(20,
                                                      energy=7120.,
                                                      total_flux=1e12)

    #
    C = microcrystal(
        Deff_A=4000, length_um=4.,
        beam_diameter_um=1.0)  # assume smaller than 10 um crystals

    if quick: prefix_root = prefix + "_%06d"
    else: prefix_root = prefix + "laue_%06d"

    Nimages = 1  # 10000
    from LS49 import legacy_random_orientations
    random_orientations = legacy_random_orientations(Nimages)
    for iteration in range(Nimages):
        file_prefix = prefix_root % iteration
        rand_ori = sqr(random_orientations[iteration])
        run_sim2smv(prefix=file_prefix,
                    crystal=C,
                    spectra=iterator,
                    rotation=rand_ori,
                    quick=quick,
                    rank=0)
예제 #2
0
def tst_all(quick=False, prefix="step6"):
    from LS49.spectra.generate_spectra import spectra_simulation
    SS = spectra_simulation()
    iterator = SS.generate_recast_renormalized_images(20,
                                                      energy=7120.,
                                                      total_flux=1e12)

    #
    C = microcrystal(
        Deff_A=4000, length_um=4.,
        beam_diameter_um=1.0)  # assume smaller than 10 um crystals
    mt = flex.mersenne_twister(seed=0)

    if quick: prefix_root = prefix + "_%06d"
    else: prefix_root = prefix + "poly_%06d"

    Nimages = 1  # 10000
    for iteration in range(Nimages):
        file_prefix = prefix_root % iteration
        rand_ori = sqr(mt.random_double_r3_rotation_matrix())
        run_sim2smv(prefix=file_prefix,
                    crystal=C,
                    spectra=iterator,
                    rotation=rand_ori,
                    quick=quick,
                    rank=0)
예제 #3
0
def tst_iterators():
  from LS49.spectra.generate_spectra import spectra_simulation
  SS = spectra_simulation()
  import six
  if six.PY3:
    reference = cPickle.load(
      open(os.path.join(ls49_big_data,"reference","tst_spectrum_iterator_data"),"rb"),encoding="bytes")
  else:
    reference = cPickle.load(open(os.path.join(ls49_big_data,"reference","tst_spectrum_iterator_data"),"rb"))

  # use of single iterator with next()
  iterator = SS.generate_recast_renormalized_images(20,energy=7120.,total_flux=1e12)
  for x in range(20):
    wavlen, flux, wavelength_A = next(iterator) # list of lambdas, list of fluxes, average wavelength
    assert approx_equal(wavlen,reference[x][0]), "wavelength axis"
    assert approx_equal(flux, reference[x][1]), "flux axis"
    assert approx_equal(wavelength_A,reference[x][2],eps=1E-10), "mean wavelength"

  # get a new iterator for every event
  for x in range(10):
    iterator = SS.generate_recast_renormalized_image(image=x,energy=7120.,total_flux=1e12)
    wavlen, flux, wavelength_A = next(iterator) # list of lambdas, list of fluxes, average wavelength
    assert approx_equal(wavlen,reference[x][0]), "iterator 2 wavelength axis"
    assert approx_equal(flux, reference[x][1]), "iterator 2 flux axis"
    assert approx_equal(wavelength_A, reference[x][2],eps=1E-10), "iterator 2 mean wavelength"
예제 #4
0
def create_reference_results():
  from LS49.spectra.generate_spectra import spectra_simulation
  SS = spectra_simulation()
  iterator = SS.generate_recast_renormalized_images(20,energy=7120.,total_flux=1e12)
  results=[]
  #SS.plot_recast_images(20,7120.) # optionally plot these spectra
  for x in range(20):
    wavlen, flux, wavelength_A = next(iterator) # list of lambdas, list of fluxes, average wavelength
    results.append((wavlen, flux, wavelength_A))
  cPickle.dump(results,open(os.path.join(ls49_big_data,"reference","tst_spectrum_iterator_data"),"wb"),cPickle.HIGHEST_PROTOCOL)
예제 #5
0
def channel_wavelength_fmodel(create):
    from LS49.spectra.generate_spectra import spectra_simulation
    SS = spectra_simulation()
    iterator = SS.generate_recast_renormalized_images(20,
                                                      energy=7120.,
                                                      total_flux=1e12)
    wavlen, flux, wavelength_A = next(
        iterator)  # list of lambdas, list of fluxes, average wavelength

    direct_algo_res_limit = 1.7
    from LS49.sim.util_fmodel import gen_fmodel

    GF = gen_fmodel(resolution=direct_algo_res_limit,
                    pdb_text=get_pdb_lines(),
                    algorithm="fft",
                    wavelength=wavelength_A)
    GF.set_k_sol(0.435)
    GF.make_P1_primitive()
    for x in range(10, len(flux), 5):
        print("+++++++++++++++++++++++++++++++++++++++ Wavelength", x)
        GF.reset_wavelength(wavelength_A)
        GF.reset_specific_at_wavelength(label_has="FE1",
                                        tables=Fe_oxidized_model,
                                        newvalue=wavelength_A)
        GF.reset_specific_at_wavelength(label_has="FE2",
                                        tables=Fe_reduced_model,
                                        newvalue=wavelength_A)
        sfall_channel = GF.get_amplitudes()
        filename = "sf_reference_channel_%s" % ("%03d" % x)
        if create:  # write the reference for the first time
            cPickle.dump(
                sfall_channel,
                open(os.path.join(ls49_big_data, "reference", filename), "wb"),
                cPickle.HIGHEST_PROTOCOL)
        else:  # read the reference and assert sameness to sfall_channel
            print(os.path.join(ls49_big_data, "reference", filename))

            if six.PY3:
                sfall_ref = cPickle.load(open(
                    os.path.join(ls49_big_data, "reference", filename), "rb"),
                                         encoding="bytes")
                fix_unpickled_attributes(sfall_ref)
            else:
                sfall_ref = cPickle.load(
                    open(os.path.join(ls49_big_data, "reference", filename),
                         "rb"))

            T = sfall_channel
            S = sfall_ref
            assert S.space_group() == T.space_group()
            assert S.unit_cell().parameters() == T.unit_cell().parameters()
            assert S.indices() == T.indices()
            assert S.data() == T.data()
예제 #6
0
def single_wavelength_fmodel(create):
    from LS49.spectra.generate_spectra import spectra_simulation
    SS = spectra_simulation()
    iterator = SS.generate_recast_renormalized_images(20,
                                                      energy=7120.,
                                                      total_flux=1e12)
    wavlen, flux, wavelength_A = next(
        iterator)  # list of lambdas, list of fluxes, average wavelength

    direct_algo_res_limit = 1.7
    from LS49.sim.util_fmodel import gen_fmodel
    for flag in [True, False]:
        GF = gen_fmodel(resolution=direct_algo_res_limit,
                        pdb_text=get_pdb_lines(),
                        algorithm="fft",
                        wavelength=wavelength_A)
        GF.set_k_sol(0.435)
        if flag: GF.make_P1_primitive()
        sfall_main = GF.get_amplitudes()
        sfall_main.show_summary(prefix="Amplitudes used ")
        if create:  # write the reference for the first time
            cPickle.dump(
                sfall_main,
                open(
                    os.path.join(ls49_big_data, "reference",
                                 "sf_reference_cb_to_P1_%s" % (str(flag))),
                    "wb"), cPickle.HIGHEST_PROTOCOL)
        else:  # read the reference and assert sameness to sfall_main

            if six.PY3:
                sfall_ref = cPickle.load(open(
                    os.path.join(ls49_big_data, "reference",
                                 "sf_reference_cb_to_P1_%s" % (str(flag))),
                    "rb"),
                                         encoding="bytes")
                from LS49.tests.tst_sf_energies import fix_unpickled_attributes
                fix_unpickled_attributes(sfall_ref)
            else:
                sfall_ref = cPickle.load(
                    open(
                        os.path.join(ls49_big_data, "reference",
                                     "sf_reference_cb_to_P1_%s" % (str(flag))),
                        "rb"))

            T = sfall_main
            S = sfall_ref
            assert S.space_group() == T.space_group()
            assert S.unit_cell().parameters() == T.unit_cell().parameters()
            assert S.indices() == T.indices()
            assert S.data() == T.data()
        print()
예제 #7
0
def test_Gi_factor(G):
  from LS49.spectra.generate_spectra import spectra_simulation
  SS = spectra_simulation()
  x = flex.double()
  y = flex.double()
  for key in G.images_strong:
    print (key,"of",len(G.images_strong),G.images_Gi[key])
    #trying here to plot the Gi against the integrated spectrum for each event.
    iterator = SS.generate_recast_renormalized_image(image=key,energy=7120.,total_flux=1e12)
    wavlen, flux, wavelength_A = next(iterator) # list of lambdas, list of fluxes, average wavelength
    total_flux = flex.sum(flux)
    x.append(total_flux)
    y.append(G.images_Gi[key])
  from matplotlib import pyplot as plt
  plt.plot(x,y,"r.")
  plt.show()
예제 #8
0
def tst_all():
    from LS49.spectra.generate_spectra import spectra_simulation
    SS = spectra_simulation()
    #SS.plot_recast_images(20,energy=7150.)
    iterator = SS.generate_recast_renormalized_images(20,
                                                      energy=7150.,
                                                      total_flux=1e12)

    #
    fileout = "step3noiseimage_001.cbf"
    #
    C = microcrystal(Deff_A=4000, length_um=5.,
                     beam_diameter_um=3.)  # assume smaller than 10 um crystals
    mt = flex.mersenne_twister(seed=0)
    rand_ori = sqr(mt.random_double_r3_rotation_matrix())

    run_sim2smv(
        fileout=fileout,
        crystal=C,
        spectra=iterator,
        rotation=rand_ori,
    )
    import os
    assert os.path.isfile(fileout)
예제 #9
0
    omptbx.omp_set_num_threads(workaround_nt)
    print("## hello from rank %d of %d"%(rank,size),"with omp_threads=",omp_get_num_procs())
    

    ## assign jobs
    rank_in_pdb, size_in_pdb, ranks_in_pdb = jobAssign(size=size, num_pdb=simparams.num_pdbs, num_img=simparams.num_img[0])


    import datetime
    start_elapse = time.time()

    if rank == 0:
        print("Rank 0 time", datetime.datetime.now())
        from LS49.spectra.generate_spectra import spectra_simulation

        SS = spectra_simulation()
        C = microcrystal(Deff_A = simparams.Deff_A, length_um = simparams.length_um, beam_diameter_um = simparams.beam_diameter_um)   
        # assume smaller than 10 um crystals

        mt = flex.mersenne_twister(seed=0)
        random_orientations = []
        for iteration in range( sum(simparams.num_img) ):
            random_orientations.append( mt.random_double_r3_rotation_matrix() )
        
        # for ii in range(10): print("## TOP 10 orientations = ", random_orientations[ii])
        print("## total orientations = ", len(random_orientations))
        transmitted_info = dict(spectra = SS, crystal = C, random_orientations = random_orientations)

        for idx_pdb in range( len(simparams.pdb_files) ):
            save_folder = "./" + simparams.prefix + "_" + str(idx_pdb).zfill(3)
            if not os.path.isdir(save_folder):
예제 #10
0
파일: LY99_batch.py 프로젝트: dwpaley/LS49
def run_LY99_batch(test_without_mpi=False):
    params, options = parse_input()
    log_by_rank = bool(int(os.environ.get("LOG_BY_RANK", 0)))
    rank_profile = bool(int(os.environ.get("RANK_PROFILE", 1)))
    if log_by_rank:
        import io, sys
    if rank_profile:
        import cProfile
        pr = cProfile.Profile()
        pr.enable()

    if test_without_mpi:
        from LS49.adse13_196.mock_mpi import mpiEmulator
        MPI = mpiEmulator()
    else:
        from libtbx.mpi4py import MPI

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    import omptbx
    workaround_nt = int(os.environ.get("OMP_NUM_THREADS", 1))
    omptbx.omp_set_num_threads(workaround_nt)
    N_total = int(os.environ["N_SIM"])  # number of items to simulate
    N_stride = size  # total number of worker tasks
    print("hello from rank %d of %d" % (rank, size), "with omp_threads=",
          omp_get_num_procs())
    import datetime
    start_comp = time()

    # now inside the Python imports, begin energy channel calculation

    wavelength_A = 1.74  # general ballpark X-ray wavelength in Angstroms
    wavlen = flex.double([12398.425 / (7070.5 + w) for w in range(100)])
    direct_algo_res_limit = 1.7

    local_data = data()  # later put this through broadcast

    GF = gen_fmodel(resolution=direct_algo_res_limit,
                    pdb_text=local_data.get("pdb_lines"),
                    algorithm="fft",
                    wavelength=wavelength_A)
    GF.set_k_sol(0.435)
    GF.make_P1_primitive()

    # Generating sf for my wavelengths
    sfall_channels = {}
    for x in range(len(wavlen)):
        if rank > len(wavlen): break
        if x % size != rank: continue

        GF.reset_wavelength(wavlen[x])
        GF.reset_specific_at_wavelength(
            label_has="FE1",
            tables=local_data.get("Fe_oxidized_model"),
            newvalue=wavlen[x])
        GF.reset_specific_at_wavelength(
            label_has="FE2",
            tables=local_data.get("Fe_reduced_model"),
            newvalue=wavlen[x])
        sfall_channels[x] = GF.get_amplitudes()

    reports = comm.gather(sfall_channels, root=0)
    if rank == 0:
        sfall_channels = {}
        for report in reports:
            sfall_channels.update(report)
    comm.barrier()

    print(
        rank, time(),
        "finished with the calculation of channels, now construct single broadcast"
    )

    if rank == 0:
        print("Rank 0 time", datetime.datetime.now())
        from LS49.spectra.generate_spectra import spectra_simulation
        from LS49.adse13_196.revapi.LY99_pad import microcrystal
        print("hello2 from rank %d of %d" % (rank, size))
        SS = spectra_simulation()
        C = microcrystal(
            Deff_A=4000, length_um=4.,
            beam_diameter_um=1.0)  # assume smaller than 10 um crystals
        from LS49 import legacy_random_orientations
        random_orientations = legacy_random_orientations(N_total)
        transmitted_info = dict(spectra=SS,
                                crystal=C,
                                sfall_info=sfall_channels,
                                random_orientations=random_orientations)
    else:
        transmitted_info = None
    transmitted_info = comm.bcast(transmitted_info, root=0)
    comm.barrier()
    parcels = list(range(rank, N_total, N_stride))

    print(rank, time(),
          "finished with single broadcast, now set up the rank logger")

    if log_by_rank:
        expand_dir = os.path.expandvars(params.logger.outdir)
        log_path = os.path.join(expand_dir, "rank_%d.log" % rank)
        error_path = os.path.join(expand_dir, "rank_%d.err" % rank)
        #print("Rank %d redirecting stdout/stderr to"%rank, log_path, error_path)
        sys.stdout = io.TextIOWrapper(open(log_path, 'ab', 0),
                                      write_through=True)
        sys.stderr = io.TextIOWrapper(open(error_path, 'ab', 0),
                                      write_through=True)

    print(
        rank, time(),
        "finished with the rank logger, now construct the GPU cache container")

    import random
    gpu_instance = get_exascale("gpu_instance", params.context)
    gpu_energy_channels = get_exascale("gpu_energy_channels", params.context)

    gpu_run = gpu_instance(deviceId=rank %
                           int(os.environ.get("DEVICES_PER_NODE", 1)))
    gpu_channels_singleton = gpu_energy_channels(
        deviceId=gpu_run.get_deviceID())
    # singleton will instantiate, regardless of gpu, device count, or exascale API

    comm.barrier()
    while len(parcels) > 0:
        idx = random.choice(parcels)
        cache_time = time()
        print("idx------start-------->", idx, "rank", rank, time())
        # if rank==0: os.system("nvidia-smi")
        tst_one(
            image=idx,
            spectra=transmitted_info["spectra"],
            crystal=transmitted_info["crystal"],
            random_orientation=transmitted_info["random_orientations"][idx],
            sfall_channels=transmitted_info["sfall_info"],
            gpu_channels_singleton=gpu_channels_singleton,
            rank=rank,
            params=params)
        parcels.remove(idx)
        print("idx------finis-------->", idx, "rank", rank, time(), "elapsed",
              time() - cache_time)
    comm.barrier()
    del gpu_channels_singleton
    # avoid Kokkos allocation "device_Fhkl" being deallocated after Kokkos::finalize was called
    print("Overall rank", rank, "at", datetime.datetime.now(),
          "seconds elapsed after srun startup %.3f" % (time() - start_elapse))
    print("Overall rank", rank, "at", datetime.datetime.now(),
          "seconds elapsed after Python imports %.3f" % (time() - start_comp))
    if rank_profile:
        pr.disable()
        pr.dump_stats("cpu_%d.prof" % rank)
예제 #11
0
    def run(self):

        self.parse_input()

        N_total = 100000  # self.params.N_total # number of items to simulate, nominally 100000
        logical_rank = self.mpi_helper.rank
        logical_size = self.mpi_helper.size
        if self.mpi_helper.rank == 0 and self.mpi_helper.size == 1:  # special case of testing it
            try:
                logical_rank = self.params.tester.rank
                logical_size = self.params.tester.size
            except Exception:
                pass
        N_stride = int(math.ceil(
            N_total / logical_size))  # total number of tasks per rank
        print("hello from rank %d of %d with stride %d" %
              (logical_rank, logical_size, N_stride))

        #from scitbx.lbfgs.tst_mpi_split_evaluator import mpi_split_evaluator_run
        #from scitbx.lbfgs.tst_mpi_split_evaluator import run_mpi as simple_tester
        #simple_tester()

        if self.params.starting_model.algorithm == "to_file":
            if self.mpi_helper.rank == 0:
                HKL_lookup, static_fcalcs = get_static_fcalcs_with_HKL_lookup()
                from LS49.work2_for_aca_lsq.remake_range_intensities_with_complex \
                   import get_intensity_structure
                model_intensities = get_intensity_structure(
                    static_fcalcs,
                    FE1_model=Fe_oxidized_model,
                    FE2_model=Fe_reduced_model)
                with (open(self.params.starting_model.filename, "wb")) as out:
                    pickle.dump(HKL_lookup, out, pickle.HIGHEST_PROTOCOL)
                    pickle.dump(static_fcalcs, out, pickle.HIGHEST_PROTOCOL)
                    pickle.dump(model_intensities, out,
                                pickle.HIGHEST_PROTOCOL)
            return
        else:
            if self.mpi_helper.rank == 0:
                with (open(self.params.starting_model.filename, "rb")) as inp:
                    print("the starting model (used for channel weighting) is",
                          self.params.starting_model.filename)
                    HKL_lookup = pickle.load(inp)
                    static_fcalcs = pickle.load(inp)
                    model_intensities = pickle.load(inp)
                    from LS49.spectra.generate_spectra import spectra_simulation
                    from LS49.sim.step5_pad import microcrystal

                shuffA = list(range(N_total))
                import random
                random.shuffle(shuffA)

                transmitted_info = dict(
                    HKL_lookup=HKL_lookup,
                    static_fcalcs=static_fcalcs,
                    model_intensities=model_intensities,
                    spectra_simulation=spectra_simulation(),
                    crystal=microcrystal(Deff_A=4000,
                                         length_um=4.,
                                         beam_diameter_um=1.0),
                    shuffA=shuffA)
            else:
                transmitted_info = None
        print("before braodcast with ", self.mpi_helper.rank,
              self.mpi_helper.size)
        transmitted_info = self.mpi_helper.comm.bcast(transmitted_info, root=0)
        self.mpi_helper.comm.barrier()
        print("after barrier")

        # -----------------------------------------------------------------------
        if self.mpi_helper.rank == 0:
            print("Finding initial G and abc factors")
        per_rank_items = []
        per_rank_keys = []
        per_rank_G = []
        min_spots = 3
        N_input = 0
        import os, omptbx  # cori workaround, which does not get OMP_NUM_THREADS from environment
        workaround_nt = int(os.environ.get("OMP_NUM_THREADS", 1))
        omptbx.omp_set_num_threads(workaround_nt)
        for item, key in get_items(logical_rank, N_total, N_stride,
                                   transmitted_info["shuffA"],
                                   self.params.cohort):
            N_input += 1
            if len(item) >= min_spots:
                try:
                    FOI = fit_one_image_multispot(
                        key=key,
                        list_of_images=item,
                        HKL_lookup=transmitted_info["HKL_lookup"],
                        model_intensities=transmitted_info[
                            "model_intensities"],
                        spectra=transmitted_info["spectra_simulation"],
                        crystal=transmitted_info["crystal"])
                except RuntimeError as e:
                    # no recovery from LBFGS error, skip event
                    continue
                metric_P1, metric_C2 = FOI.DRM.get_current_angular_offsets(
                    FOI.x[-4:-1])
                print(
                    """LLG Image %06d on %d Bragg spots NLL    channels F = %9.1f angular offsets in P1 and C2 (degrees): %8.5f %8.5f"""
                    %
                    (key, len(item), FOI.compute_functional_and_gradients()[0],
                     metric_P1, metric_C2), FOI.x[-4:-1])

                # reporting out results to new abc_coverage pickles.  Use the old one "item" as a template:
                #    item := [<LS49.work2_for_aca_lsq.abc_background.fit_roi_multichannel>,... one for each spot]
                # each fit_roi has the following attributes and we modify them as follows:
                #        'a', the abcG parameters of the original one-spot fit, unused
                #        'asu_idx_C2_setting', unmodified
                #        'image_no', same as key, unmodified
                #        'n', number of parameters for one-spot fit, unused
                #        'orig_idx_C2_setting', unmodified
                #        'sb_data', original shoebox data [integers as floats], passed along unmodified
                #        'simtbx_P1_miller', unmodified
                #        'simtbx_intensity_7122', unmodified
                #        'x', the abcG parameters of the original one-spot fit, unused
                # modify these:
                #        'bkgrd_a', the spot abc parameters output here, to be passed on to global data fit
                for ispot in range(len(item)):
                    item[ispot].bkgrd_a = FOI.x[3 * ispot:3 * (ispot + 1)]
                    #        'channels', need to save the new data that was set during update_roi_model_pixels_with_current_rotation()
                    #                    but only for the Amat, not the derivatives.
                    item[ispot].channels = FOI.new_calc2_dict_last_round[
                        ispot]["channels"]
                    #        'roi', get new region of interest summation that was set during update_roi_model_pixels_with_current_rotation()
                    item[ispot].roi = FOI.roi_model_pixels[ispot]

                # put the newly refined background model back into the item
                per_rank_keys.append(key)
                per_rank_G.append(FOI.a[-1])

                print(
                    "pickling modified abc_coverage file for key %d in rank %d"
                    % (key, logical_rank), )
                with open(abc_glob_pixel_ref % (key), "wb") as F:
                    pickle.dump(item, F, pickle.HIGHEST_PROTOCOL)

        print("rank %d has %d refined images" %
              (logical_rank, len(per_rank_keys)))

        N_ranks = self.mpi_helper.comm.reduce(1, self.mpi_helper.MPI.SUM, 0)
        N_refined_images = self.mpi_helper.comm.reduce(len(per_rank_keys),
                                                       self.mpi_helper.MPI.SUM,
                                                       0)
        N_input_images = self.mpi_helper.comm.reduce(N_input,
                                                     self.mpi_helper.MPI.SUM,
                                                     0)
        self.mpi_helper.comm.barrier()
        if self.mpi_helper.rank == 0:
            print("final report %d ranks, %d input images, %d refined models" %
                  (N_ranks, N_input_images, N_refined_images))
            print("Finished finding initial G and abc factors")

        self.mpi_helper.comm.barrier()