コード例 #1
0
ファイル: replot.py プロジェクト: monarin/LS49
def plot_em(self, key, values):
    self.x = values  # XXX
    if not self.plot_plt_imported:
        from matplotlib import pyplot as plt
        self.plt = plt
        if self.params.LLG_evaluator.title is None:
            self.plt.ion()  # interactive - on
        self.plot_plt_imported = True
    if self.params.LLG_evaluator.title is None:
        self.plt.cla()  #clear last access
    fine = self.params.LLG_evaluator.plot_interpolation  # plot the non-modeled f values
    fig = self.plt.figure()

    # ground truth
    from LS49.sim.step5_pad import full_path
    GS = george_sherrell(full_path("data_sherrell/pf-rd-ox_fftkk.out"))
    GS.plot_them(self.plt, f1="b-", f2="b-")
    GS = george_sherrell(full_path("data_sherrell/pf-rd-red_fftkk.out"))
    GS.plot_them(self.plt, f1="r-", f2="r-")
    GS = george_sherrell(
        full_path("data_sherrell/Fe_fake.dat"))  # with interpolated points
    GS.plot_them(self.plt, f1="m-", f2="m-")

    # starting values
    GS = george_sherrell_star(fp=self.starting_params_FE1[0:100],
                              fdp=self.starting_params_FE1[100:200])
    GS.plot_them(fine, self.plt, f1="bx", f2="bx")
    GS = george_sherrell_star(fp=self.starting_params_FE2[0:100],
                              fdp=self.starting_params_FE2[100:200])
    GS.plot_them(fine, self.plt, f1="rx", f2="rx")

    # current values
    GS = george_sherrell_star(fp=self.x[0:100], fdp=self.x[100:200])
    GS.plot_them(fine, self.plt, f1="b.", f2="b.")
    GS = george_sherrell_star(fp=self.x[200:300], fdp=self.x[300:400])
    GS.plot_them(fine, self.plt, f1="r.", f2="r.")

    self.plt.axes().set_xlim((7102, 7137))  # XXX 7088,7152
    self.plt.axes().set_ylim((-8.6, 4.5))
    self.plt.title("Macrocycle %d Iteration %d" %
                   (self.macrocycle, self.iteration))  # XXX
    if self.params.LLG_evaluator.title is not None:
        macrocycle_tell = "" if self.macrocycle is None else "macrocycle_%02d_" % self.macrocycle
        fig.savefig(
            os.path.join(
                self.params.LLG_evaluator.plot_outdir,
                "replot_%s_%siteration_%02d.png" %
                (self.params.LLG_evaluator.title, macrocycle_tell,
                 self.iteration)))
    else:
        self.plt.draw()
        self.plt.pause(0.2)
    fig.clf()  # clear figure XXX
コード例 #2
0
ファイル: replot.py プロジェクト: monarin/LS49
def plot_em_broken(self, key, values):
    if self.params.LLG_evaluator.plot_scope == "P1":
        scope = dict(xlimits=(7102, 7138),
                     fdp_ylimits=(0.1, 4.5),
                     fp_ylimits=(-8.6, -5.1))
    elif self.params.LLG_evaluator.plot_scope == "P2":
        scope = dict(xlimits=(7068, 7172),
                     fdp_ylimits=(-2.1, 6.5),
                     fp_ylimits=(-10.6, -3.1))
    self.x = values  # XXX
    cc = CC_to_ground_truth()
    if not self.plot_plt_imported:
        from matplotlib import pyplot as plt
        self.plt = plt
        if self.params.LLG_evaluator.title is None:
            self.plt.ion()  # interactive - on
        self.plot_plt_imported = True
    if self.params.LLG_evaluator.title is None:
        self.plt.cla()  #clear last access
    fine = self.params.LLG_evaluator.plot_interpolation  # plot the non-modeled f values
    fig, (ax1, ax2) = self.plt.subplots(2, 1, sharex=True, squeeze=True)

    # ground truth
    from LS49.sim.step5_pad import full_path
    GS = george_sherrell(full_path("data_sherrell/pf-rd-ox_fftkk.out"))
    GS.plot_them(ax1, f1="b-", f2="b-")
    GS.plot_them(ax2, f1="b-", f2="b-")
    cc.get_gt(GS, imodel=0)
    GS = george_sherrell(full_path("data_sherrell/pf-rd-red_fftkk.out"))
    GS.plot_them(ax1, f1="r-", f2="r-")
    GS.plot_them(ax2, f1="r-", f2="r-")
    cc.get_gt(GS, imodel=1)
    GS = george_sherrell(
        full_path("data_sherrell/Fe_fake.dat"))  # with interpolated points
    GS.plot_them(ax1, f1="m-", f2="m-")
    GS.plot_them(ax2, f1="m-", f2="m-")

    # starting values
    GS = george_sherrell_star(fp=self.starting_params_FE1[0:100],
                              fdp=self.starting_params_FE1[100:200])
    GS.plot_them(fine, ax1, f1="bx", f2="bx")
    GS.plot_them(fine, ax2, f1="bx", f2="bx")
    GS = george_sherrell_star(fp=self.starting_params_FE2[0:100],
                              fdp=self.starting_params_FE2[100:200])
    GS.plot_them(fine, ax1, f1="rx", f2="rx")
    GS.plot_them(fine, ax2, f1="rx", f2="rx")

    # current values
    GS = george_sherrell_star(fp=self.x[0:100], fdp=self.x[100:200])
    GS.plot_them(fine, ax1, f1="b.", f2="b.")
    GS.plot_them(fine, ax2, f1="b.", f2="b.")
    cc.get_data(GS, imodel=0)
    GS = george_sherrell_star(fp=self.x[200:300], fdp=self.x[300:400])
    GS.plot_them(fine, ax1, f1="r.", f2="r.")
    GS.plot_them(fine, ax2, f1="r.", f2="r.")
    cc.get_data(GS, imodel=1)

    #self.plt.axes().set_xlim((7102,7137)) # XXX 7088,7152
    ax1.set_xlim(scope["xlimits"])
    ax2.set_xlabel("Energy (eV)")
    ax1.set_ylabel("∆ f ′′")
    ax2.set_ylabel("∆ f ′")
    ax2.set_ylim(scope["fp_ylimits"])
    ax1.set_ylim(scope["fdp_ylimits"])
    ax1.set_title("Macrocycle %d Iteration %d" %
                  (self.macrocycle, self.iteration))  # XXX
    ax1.spines['bottom'].set_visible(False)
    ax2.spines['top'].set_visible(False)
    ax1.xaxis.tick_top()
    ax1.tick_params(labeltop='off')  # don't put tick labels at the top
    ax2.xaxis.tick_bottom()
    d = .015  # how big to make the diagonal lines in axes coordinates
    # arguments to pass to plot, just so we don't keep repeating them
    kwargs = dict(transform=ax1.transAxes,
                  color='k',
                  clip_on=False,
                  linewidth=1)
    ax1.plot((-d, +d), (-d, +d), **kwargs)  # top-left diagonal
    ax1.plot((1 - d, 1 + d), (-d, +d), **kwargs)  # top-right diagonal

    kwargs.update(transform=ax2.transAxes)  # switch to the bottom axes
    ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs)  # bottom-left diagonal
    ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs)  # bottom-right diagonal

    if self.params.LLG_evaluator.title is not None:
        macrocycle_tell = "" if self.macrocycle is None else "macrocycle_%02d_" % self.macrocycle
        fig.savefig(
            os.path.join(
                self.params.LLG_evaluator.plot_outdir,
                "replot_%s_%siteration_%02d.png" %
                (self.params.LLG_evaluator.title, macrocycle_tell,
                 self.iteration)))
        if self.macrocycle in [
                1, 2, 3
        ] and self.iteration == self.params.LLG_evaluator.max_calls:
            fig.savefig(
                os.path.join(
                    self.params.LLG_evaluator.plot_outdir,
                    "replot_%s_%siteration_%02d.pdf" %
                    (self.params.LLG_evaluator.title, macrocycle_tell,
                     self.iteration)))
        print(
            "%s_%siteration_%02d CC=%6.3f%%" %
            (self.params.LLG_evaluator.title, macrocycle_tell, self.iteration,
             100 * cc.get_cc()), "CC_fp = %6.3f%% CC_fdp = %6.3f%%" %
            (100 * cc.get_cc_fp(), 100 * cc.get_cc_fdp()),
            "rmsd fp: %6.4f, rmsd fdp %6.4f" %
            (cc.get_rmsd_fp(), cc.get_rmsd_fdp()))
    else:
        self.plt.draw()
        self.plt.pause(0.2)
コード例 #3
0
ファイル: macrocycle_refinery.py プロジェクト: monarin/LS49
    def run(self):

        self.parse_input()

        N_total = self.params.N_total  # number of items to simulate, nominally 100000
        logical_rank = self.mpi_helper.rank
        logical_size = self.mpi_helper.size
        if self.mpi_helper.rank == 0 and self.mpi_helper.size == 1:  # special case of testing it
            try:
                logical_rank = self.params.tester.rank
                logical_size = self.params.tester.size
            except Exception:
                pass
        N_stride = int(math.ceil(
            N_total / logical_size))  # total number of tasks per rank
        print("hello from rank %d of %d with stride %d" %
              (logical_rank, logical_size, N_stride))

        from scitbx.lbfgs.tst_mpi_split_evaluator import mpi_split_evaluator_run

        assert self.params.starting_model.algorithm!="to_file", \
               "run new_global__fdp_refinery.py first, to generate starting model"
        if self.mpi_helper.rank == 0:
            with (open(self.params.starting_model.filename, "rb")) as inp:
                HKL_lookup = pickle.load(inp)
                static_fcalcs = pickle.load(inp)
                model_intensities = pickle.load(inp)

            transmitted_info = dict(HKL_lookup=HKL_lookup,
                                    static_fcalcs=static_fcalcs,
                                    model_intensities=model_intensities)
        else:
            transmitted_info = None
        transmitted_info = self.mpi_helper.comm.bcast(transmitted_info, root=0)
        self.mpi_helper.comm.barrier()
        # macrocycle 1 ---------------------------------------------------------
        # generate model_intensities table based on initial conditions
        if logical_rank == 0 or self.mpi_helper.size == 1:
            FE1 = local_data.get(self.params.starting_model.preset.FE1)
            FE2 = local_data.get(self.params.starting_model.preset.FE2)

            from LS49.work2_for_aca_lsq.remake_range_intensities_with_complex \
               import get_intensity_structure
            new_model_intensities = get_intensity_structure(
                transmitted_info["static_fcalcs"],
                FE1_model=FE1,
                FE2_model=FE2)
            broadcast_info = new_model_intensities
        else:
            broadcast_info = None
        current_model_intensities = self.mpi_helper.comm.bcast(broadcast_info,
                                                               root=0)
        self.mpi_helper.comm.barrier()

        # -----------------------------------------------------------------------
        if self.mpi_helper.rank == 0:
            print("Finding initial G and abc factors")
        per_rank_items = []
        per_rank_keys = []
        per_rank_G = []
        min_spots = 3
        N_input = 0
        for item, key in get_items(logical_rank, N_total, N_stride,
                                   self.params.cohort):
            N_input += 1
            if len(item) >= min_spots:
                try:
                    FOI = fit_one_image_multispot(
                        list_of_images=item,
                        HKL_lookup=transmitted_info["HKL_lookup"],
                        model_intensities=current_model_intensities)
                except Exception as e:
                    print("FAILing fit_roi_multichannel on", e)
                    continue
                print(
                    """LLG Image %06d on %d Bragg spots NLL    channels F = %9.1f"""
                    % (key, len(item),
                       FOI.compute_functional_and_gradients()[0]))
                # put the newly refined background model back into the item
                per_rank_items.append(item)
                per_rank_keys.append(key)
                for ihkl in range(FOI.n_spots):
                    per_rank_items[-1][ihkl].bkgrd_a = flex.double([
                        FOI.a[3 * ihkl + 0], FOI.a[3 * ihkl + 1],
                        FOI.a[3 * ihkl + 2]
                    ])
                per_rank_G.append(FOI.a[-1])

        print("rank %d has %d refined images" %
              (logical_rank, len(per_rank_items)))

        N_ranks = self.mpi_helper.comm.reduce(1, self.mpi_helper.MPI.SUM, 0)
        N_refined_images = self.mpi_helper.comm.reduce(len(per_rank_items),
                                                       self.mpi_helper.MPI.SUM,
                                                       0)
        N_input_images = self.mpi_helper.comm.reduce(N_input,
                                                     self.mpi_helper.MPI.SUM,
                                                     0)
        self.mpi_helper.comm.barrier()
        if self.mpi_helper.rank == 0:
            print("final report %d ranks, %d input images, %d refined models" %
                  (N_ranks, N_input_images, N_refined_images))
            print("Finished finding initial G and abc factors")
            print("Initiating the full minimization")
        # -----------------------------------------------------------------------

        W = rank_0_fit_all_f(
            self.params,
            FE1_model=local_data.get(self.params.starting_model.preset.FE1),
            FE2_model=local_data.get(self.params.starting_model.preset.FE2))
        W.reinitialize(logical_rank,
                       self.mpi_helper.size,
                       per_rank_items,
                       per_rank_keys,
                       per_rank_G,
                       transmitted_info["HKL_lookup"],
                       transmitted_info["static_fcalcs"],
                       current_model_intensities,
                       force_recompute=True)
        W.set_macrocycle(1)
        minimizer = mpi_split_evaluator_run(
            target_evaluator=W,
            termination_params=scitbx.lbfgs.termination_parameters(
                traditional_convergence_test=True,
                traditional_convergence_test_eps=1.e-2,
                max_calls=self.params.LLG_evaluator.max_calls))
        if logical_rank == 0:
            print("Minimizer ended at iteration", W.iteration)
        self.mpi_helper.comm.barrier()
        # 2nd macrocycle---------------------------------------------------------
        # generate model_intensities table based on initial conditions
        FE1 = george_sherrell_star(fp=W.x[0:100], fdp=W.x[100:200])
        FE2 = george_sherrell_star(fp=W.x[200:300], fdp=W.x[300:400])
        if logical_rank == 0 or self.mpi_helper.size == 1:

            from LS49.work2_for_aca_lsq.remake_range_intensities_with_complex \
               import get_intensity_structure
            new_model_intensities = get_intensity_structure(
                transmitted_info["static_fcalcs"],
                FE1_model=FE1,
                FE2_model=FE2)
            broadcast_info = new_model_intensities
        else:
            broadcast_info = None
        current_model_intensities = self.mpi_helper.comm.bcast(broadcast_info,
                                                               root=0)
        self.mpi_helper.comm.barrier()

        # -----------------------------------------------------------------------
        if self.mpi_helper.rank == 0:
            print("Finding initial G and abc factors")
        per_rank_items = []
        per_rank_keys = []
        per_rank_G = []
        min_spots = 3
        N_input = 0
        for item, key in get_items(logical_rank, N_total, N_stride,
                                   self.params.cohort):
            N_input += 1
            if len(item) >= min_spots:
                try:
                    FOI = fit_one_image_multispot(
                        list_of_images=item,
                        HKL_lookup=transmitted_info["HKL_lookup"],
                        model_intensities=current_model_intensities)
                except Exception as e:
                    print("FAILing fit_roi_multichannel on", e)
                    continue
                print(
                    """LLG Image %06d on %d Bragg spots NLL    channels F = %9.1f"""
                    % (key, len(item),
                       FOI.compute_functional_and_gradients()[0]))
                # put the newly refined background model back into the item
                per_rank_items.append(item)
                per_rank_keys.append(key)
                for ihkl in range(FOI.n_spots):
                    per_rank_items[-1][ihkl].bkgrd_a = flex.double([
                        FOI.a[3 * ihkl + 0], FOI.a[3 * ihkl + 1],
                        FOI.a[3 * ihkl + 2]
                    ])
                per_rank_G.append(FOI.a[-1])

        print("rank %d has %d refined images" %
              (logical_rank, len(per_rank_items)))

        N_ranks = self.mpi_helper.comm.reduce(1, self.mpi_helper.MPI.SUM, 0)
        N_refined_images = self.mpi_helper.comm.reduce(len(per_rank_items),
                                                       self.mpi_helper.MPI.SUM,
                                                       0)
        N_input_images = self.mpi_helper.comm.reduce(N_input,
                                                     self.mpi_helper.MPI.SUM,
                                                     0)
        self.mpi_helper.comm.barrier()
        if self.mpi_helper.rank == 0:
            print("final report %d ranks, %d input images, %d refined models" %
                  (N_ranks, N_input_images, N_refined_images))
            print("Finished finding initial G and abc factors")
            print("Initiating the full minimization")
        # -----------------------------------------------------------------------
        W_previous = W
        W = rank_0_fit_all_f(self.params, FE1_model=FE1, FE2_model=FE2)
        W.reinitialize(logical_rank,
                       self.mpi_helper.size,
                       per_rank_items,
                       per_rank_keys,
                       per_rank_G,
                       transmitted_info["HKL_lookup"],
                       transmitted_info["static_fcalcs"],
                       current_model_intensities,
                       force_recompute=True)
        W.set_macrocycle(2, W_previous.starting_params_FE1,
                         W_previous.starting_params_FE2)
        minimizer = mpi_split_evaluator_run(
            target_evaluator=W,
            termination_params=scitbx.lbfgs.termination_parameters(
                traditional_convergence_test=True,
                traditional_convergence_test_eps=1.e-2,
                max_calls=self.params.LLG_evaluator.max_calls))
        if logical_rank == 0:
            print("Minimizer ended at iteration", W.iteration)
        self.mpi_helper.comm.barrier()
        # 3rd macrocycle-----------------------------------------------------------
        # generate model_intensities table based on initial conditions
        FE1 = george_sherrell_star(fp=W.x[0:100], fdp=W.x[100:200])
        FE2 = george_sherrell_star(fp=W.x[200:300], fdp=W.x[300:400])
        if logical_rank == 0 or self.mpi_helper.size == 1:

            from LS49.work2_for_aca_lsq.remake_range_intensities_with_complex \
               import get_intensity_structure
            new_model_intensities = get_intensity_structure(
                transmitted_info["static_fcalcs"],
                FE1_model=FE1,
                FE2_model=FE2)
            broadcast_info = new_model_intensities
        else:
            broadcast_info = None
        current_model_intensities = self.mpi_helper.comm.bcast(broadcast_info,
                                                               root=0)
        self.mpi_helper.comm.barrier()

        # -----------------------------------------------------------------------
        if self.mpi_helper.rank == 0:
            print("Finding initial G and abc factors")
        per_rank_items = []
        per_rank_keys = []
        per_rank_G = []
        min_spots = 3
        N_input = 0
        for item, key in get_items(logical_rank, N_total, N_stride,
                                   self.params.cohort):
            N_input += 1
            if len(item) >= min_spots:
                try:
                    FOI = fit_one_image_multispot(
                        list_of_images=item,
                        HKL_lookup=transmitted_info["HKL_lookup"],
                        model_intensities=current_model_intensities)
                except Exception as e:
                    print("FAILing fit_roi_multichannel on", e)
                    continue
                print(
                    """LLG Image %06d on %d Bragg spots NLL    channels F = %9.1f"""
                    % (key, len(item),
                       FOI.compute_functional_and_gradients()[0]))
                # put the newly refined background model back into the item
                per_rank_items.append(item)
                per_rank_keys.append(key)
                for ihkl in range(FOI.n_spots):
                    per_rank_items[-1][ihkl].bkgrd_a = flex.double([
                        FOI.a[3 * ihkl + 0], FOI.a[3 * ihkl + 1],
                        FOI.a[3 * ihkl + 2]
                    ])
                per_rank_G.append(FOI.a[-1])

        print("rank %d has %d refined images" %
              (logical_rank, len(per_rank_items)))

        N_ranks = self.mpi_helper.comm.reduce(1, self.mpi_helper.MPI.SUM, 0)
        N_refined_images = self.mpi_helper.comm.reduce(len(per_rank_items),
                                                       self.mpi_helper.MPI.SUM,
                                                       0)
        N_input_images = self.mpi_helper.comm.reduce(N_input,
                                                     self.mpi_helper.MPI.SUM,
                                                     0)
        self.mpi_helper.comm.barrier()
        if self.mpi_helper.rank == 0:
            print("final report %d ranks, %d input images, %d refined models" %
                  (N_ranks, N_input_images, N_refined_images))
            print("Finished finding initial G and abc factors")
            print("Initiating the full minimization")
        # -----------------------------------------------------------------------
        W_previous = W
        W = rank_0_fit_all_f(self.params, FE1_model=FE1, FE2_model=FE2)
        W.reinitialize(logical_rank,
                       self.mpi_helper.size,
                       per_rank_items,
                       per_rank_keys,
                       per_rank_G,
                       transmitted_info["HKL_lookup"],
                       transmitted_info["static_fcalcs"],
                       current_model_intensities,
                       force_recompute=True)
        W.set_macrocycle(3, W_previous.starting_params_FE1,
                         W_previous.starting_params_FE2)
        minimizer = mpi_split_evaluator_run(
            target_evaluator=W,
            termination_params=scitbx.lbfgs.termination_parameters(
                traditional_convergence_test=True,
                traditional_convergence_test_eps=1.e-2,
                max_calls=self.params.LLG_evaluator.max_calls))
        if logical_rank == 0:
            print("Minimizer ended at iteration", W.iteration)
        self.mpi_helper.comm.barrier()