Exemple #1
0
    def _write_selected_resolutions(self) -> Path:
        """ Write selected resolutions to a YAML file.

        Args:
            None.
        Returns:
            Filename where the YAML file was written.
        """
        output: Dict[str, Any] = {}
        for event_activity in [
                params.EventActivity.central, params.EventActivity.semi_central
        ]:
            output[str(event_activity)] = {"values": {}, "errors": {}}
            for harmonic in self.harmonics_to_write:
                # We only want the VZERO values!
                for key_index, analysis in \
                        analysis_config.iterate_with_selected_objects(self.analyses,
                                                                      harmonic = harmonic,
                                                                      detector = "VZERO"):
                    logger.debug(
                        f"analysis.main_detector_name: {analysis.main_detector_name}"
                    )
                    value, error = analysis.selected_resolutions[
                        event_activity.value_range]
                    output[str(
                        event_activity)]["values"][f"R{harmonic}"] = value
                    output[str(
                        event_activity)]["errors"][f"R{harmonic}"] = error

        y = yaml.yaml()
        filename = Path(self.output_info.output_prefix) / "resolution.yaml"
        with open(filename, "w") as f:
            y.dump(output, f)

        return filename
Exemple #2
0
    def _setup(self) -> bool:
        """ Setup the analysis tasks. """
        # Setup the analysis objects.
        input_hists: Dict[str, Any] = {}
        with self._progress_manager.counter(
                total=len(self.analyses),
                desc="Setting up:",
                unit="analysis objects") as setting_up:
            for key_index, analysis in analysis_config.iterate_with_selected_objects(
                    self.analyses):
                # We are effectively caching the values here.
                if not input_hists:
                    input_hists = histogram.get_histograms_in_list(
                        filename=analysis.input_filename,
                        list_name=analysis.input_list_name)

                # Setup input histograms and projectors.
                analysis.setup(input_hists=input_hists)

                # Cache the event counts for convenience.
                if not hasattr(self, "event_counts"):
                    # Determine the number of events in each centrality bin.
                    event_counts = input_hists["Centrality_selected"]
                    # Rebin from bin width of 1 to bin width of 10. We don't want to rescale because
                    # we're interested in the raw counts, not normalized by the bin width.
                    event_counts.Rebin(10)
                    self.event_counts = histogram.Histogram1D.from_existing_hist(
                        event_counts)

                # Keep track of progress
                setting_up.update()

        # Successfully setup the tasks
        return True
Exemple #3
0
    def setup(self) -> None:
        """ Setup and prepare the analysis objects.

        We reimplement it here to disable the file caching (which doesn't mix well with the tree based approach),
        and because we need the number of pt hard bins to setup the pt hard analysis.
        """
        # Setup the response matrix analysis objects and run the response matrix projectors
        # By the time that this step is complete, we should have all histograms.
        with self._progress_manager.counter(total=len(self.analyses),
                                            desc="Configuring and projecting:",
                                            unit="responses") as setting_up:
            for pt_hard_bin in self.selected_iterables["pt_hard_bin"]:
                logger.debug(f"pt_hard_bin: {pt_hard_bin}")
                for key_index, analysis in \
                        analysis_config.iterate_with_selected_objects(self.analyses, pt_hard_bin = pt_hard_bin):
                    # A cache of files isn't so straightforward here because we aren't working with ROOT files.
                    # Instead, we just neglect the cache.
                    logger.debug(f"key_index: {key_index}")
                    result = analysis.setup(input_hists=None)
                    if result is not True:
                        raise ValueError(
                            f"Setup of {key_index} analysis object failed.")
                    result = analysis.retrieve_non_projected_hists()
                    if result is not True:
                        raise ValueError(
                            f"Retrieval of non-projected hists of {key_index} analysis object failed."
                        )
                    analysis.run_projectors()

                    # Ensure that all hists have sumw2 enabled
                    analysis.set_sumw2()

                    # Update progress
                    setting_up.update()

        # Setup the pt hard bin analysis objects.
        with self._progress_manager.counter(total=len(self.pt_hard_bins),
                                            desc="Setting up: ",
                                            unit="pt hard bins") as setting_up:
            for key_index, pt_hard_bin in analysis_config.iterate_with_selected_objects(
                    self.pt_hard_bins):
                pt_hard_bin.setup(input_hists=None,
                                  n_pt_hard_bins=len(self.pt_hard_bins))

                # Update progress
                setting_up.update()
Exemple #4
0
def calculate_average_n_events(pt_hard_bins: Mapping[Any, Any]) -> float:
    """ Get relative scaling for each pt hard bin and scale the scale factors by each relative value """
    n_total_events = 0.
    for key_index, pt_hard_bin in analysis_config.iterate_with_selected_objects(pt_hard_bins):
        if pt_hard_bin.setup_complete is not True:
            raise ValueError(f"Setup was not run on pt hard bin {key_index}. Please run it and try again!")
        n_total_events += pt_hard_bin.number_of_events

    return n_total_events / len(pt_hard_bins)
Exemple #5
0
    def run(self) -> bool:
        with self._progress_manager.counter(
                total=len(self.analyses),
                desc="Setting up:",
                unit="analysis objects") as setting_up:
            for key_index, analysis in analysis_config.iterate_with_selected_objects(
                    self.analyses):
                analysis.setup()
                setting_up.update()

        with self._progress_manager.counter(
                total=len(self.analyses), desc="Running:",
                unit="analysis objects") as running:
            for key_index, analysis in analysis_config.iterate_with_selected_objects(
                    self.analyses):
                analysis.run()
                running.update()

        return True
Exemple #6
0
 def setup(self) -> None:
     """ Setup the correlations manager. """
     # Retrieve input histograms (with caching).
     input_hists: Dict[str, Any] = {}
     with self._progress_manager.counter(total = len(self.analyses),
                                         desc = "Setting up:",
                                         unit = "analysis objects") as setting_up:
         for key_index, analysis in analysis_config.iterate_with_selected_objects(self.analyses):
             # We should now have all RP orientations.
             # We are effectively caching the values here.
             if not input_hists:
                 input_hists = histogram.get_histograms_in_file(filename = analysis.input_filename)
             logger.debug(f"{key_index}")
             # Setup input histograms and projectors.
             analysis.setup(input_hists = input_hists)
             # Keep track of progress
             setting_up.update()
Exemple #7
0
    def run(self) -> bool:
        """ Setup and run the actual analysis. """
        # Setup
        result = self._setup()
        if not result:
            raise RuntimeError("Setup failed")

        # Run the calculations
        with self._progress_manager.counter(total=len(self.analyses),
                                            desc="Calculating:",
                                            unit="EP resolutions") as running:
            for key_index, analysis in analysis_config.iterate_with_selected_objects(
                    self.analyses):
                # Run the analysis
                analysis.run(event_counts=self.event_counts)
                #logger.debug(f"resolutions: {key_index} {analysis.resolution}")

                running.update()

        # Write out the results of interest.
        yaml_filename = self._write_selected_resolutions()
        logger.info(f"Wrote resolution parameters to {yaml_filename}")

        # Plot the results
        with self._progress_manager.counter(total=len(
                self.selected_iterables["harmonic"]),
                                            desc="Plotting:",
                                            unit="harmonics") as plotting:
            for harmonic in self.selected_iterables["harmonic"]:
                plot_general.event_plane_resolution_harmonic(
                    analyses_iter=analysis_config.
                    iterate_with_selected_objects(self.analyses,
                                                  harmonic=harmonic),
                    harmonic=harmonic,
                    output_info=self.output_info,
                )
                plotting.update()

        return True
    def run(self, run_plotting: bool = True) -> bool:
        """ Main driver function to create, process, and plot task hists.

        Args:
            run_plotting: If true, run plotting after the processing.
        Returns:
            True if the processing was successful.
        """
        logger.info("About to process")
        for keys, task in analysis_config.iterate_with_selected_objects(
                self.tasks):
            # Print the task selected analysis options
            opts = [f"{name}: \"{str(value)}\"" for name, value in keys]
            options = "\n\t".join(opts)
            logger.info(
                f"Processing plotting task {task.task_name} with options:\n\t{options}"
            )

            # Setup task, run the processing, and plot the histograms.
            task.setup()
            task.run(run_plotting=run_plotting)

        return True
Exemple #9
0
    def run(self) -> bool:
        """ Run the Z vertex analysis. """
        steps = 3
        with self._progress_manager.counter(total = steps,
                                            desc = "Overall processing progress:",
                                            unit = "") as overall_progress:
            # First setup the correlations
            self.setup()
            overall_progress.update()

            # First analysis step
            with self._progress_manager.counter(total = len(self.analyses),
                                                desc = "Projecting:",
                                                unit = "z vertex analysis objects") as projecting:
                for key_index, analysis in analysis_config.iterate_with_selected_objects(self.analyses):
                    analysis.run_projections(processing_options = self.processing_options)
                    # Keep track of progress
                    projecting.update()
            overall_progress.update()

            self._merge_z_vertex_signal_correlations()
            overall_progress.update()

        return True
Exemple #10
0
def _extracted_values(
    analyses: Mapping[Any, "correlations.Correlations"],
    selected_iterables: Mapping[str, Sequence[Any]],
    extract_value_func: Callable[["correlations.Correlations"],
                                 analysis_objects.ExtractedObservable],
    plot_labels: plot_base.PlotLabels,
    output_name: str,
    output_info: analysis_objects.PlottingOutputWrapper,
    projection_range_func: Optional[Callable[["correlations.Correlations"],
                                             str]] = None,
    extraction_range_func: Optional[Callable[["correlations.Correlations"],
                                             str]] = None
) -> None:
    """ Plot extracted values.

    Note:
        It's best to fully define the ``extract_value_func`` function even though it can often be easily accomplished
        with a lambda because only a full function definition can use explicit type checking. Since this function uses
        a variety of different sources for the data, this type checking is particularly helpful. So writing a full
        function with full typing is strongly preferred to ensure that we get it right.

    Args:
        analyses: Correlation analyses.
        selected_iterables: Iterables that were used in constructing the analysis objects. We use them to iterate
            over some iterators in a particular order (particularly the reaction plane orientation).
        extract_value_func: Function to retrieve the extracted value and error.
        plot_labels: Titles and axis labels for the plot.
        output_name: Base of name under which the plot will be stored.
        output_info: Information needed to determine where to store the plot.
        projection_range_func: Function which will provide the projection range of the extracted value given
            the inclusive object. Default: None.
        extraction_range_func: Function which will provide the extraction range of the extracted value given
            the inclusive object. Default: None.
    """
    # Setup
    fig, ax = plt.subplots(figsize=(8, 6))
    # Specify plotting properties
    # color, marker, fill marker or not
    # NOTE: Fill marker is specified when plotting because of a matplotlib bug
    # NOTE: This depends on iterating over the EP orientation in the exact manner specified below.
    ep_plot_properties = {
        # black, diamond, no fill
        params.ReactionPlaneOrientation.inclusive: ("black", "D", "none"),
        # blue = "C0", square, fill
        params.ReactionPlaneOrientation.in_plane: ("tab:blue", "s", "full"),
        # green = "C2", triangle up, fill
        params.ReactionPlaneOrientation.mid_plane: ("tab:green", "^", "full"),
        # red = "C3", circle, fill
        params.ReactionPlaneOrientation.out_of_plane: ("tab:red", "o", "full"),
    }
    cyclers = []
    plot_property_values = list(ep_plot_properties.values())
    for i, prop in enumerate(["color", "marker", "fillstyle"]):
        cyclers.append(cycler(prop, [p[i] for p in plot_property_values]))
    # We skip the fillstyle because apparently it doesn't work with the cycler at the moment due to a bug...
    # They didn't implement their add operation to handle 0, so we have to give it the explicit start value.
    combined_cyclers = sum(cyclers[1:-1], cyclers[0])
    ax.set_prop_cycle(combined_cyclers)

    # Used for labeling purposes. The values that are used are identical for all analyses.
    inclusive_analysis: Optional["correlations.Correlations"] = None
    for displace_index, ep_orientation in enumerate(
            selected_iterables["reaction_plane_orientation"]):
        # Store the values to be plotted
        values: Dict[analysis_objects.PtBin,
                     analysis_objects.ExtractedObservable] = {}
        for key_index, analysis in \
                analysis_config.iterate_with_selected_objects(
                    analyses, reaction_plane_orientation = ep_orientation
                ):
            # Store each extracted value.
            values[analysis.track_pt] = extract_value_func(analysis)
            # These are both used for labeling purposes and are identical for all analyses that are iterated over.
            if ep_orientation == params.ReactionPlaneOrientation.inclusive and inclusive_analysis is None:
                inclusive_analysis = analysis

        # Plot the values
        bin_centers = np.array([k.bin_center for k in values])
        bin_centers = bin_centers + displace_index * 0.05
        ax.errorbar(
            bin_centers,
            [v.value for v in values.values()],
            yerr=[v.error for v in values.values()],
            label=ep_orientation.display_str(),
            linestyle="",
            fillstyle=ep_plot_properties[ep_orientation][2],
        )

    # Help out mypy...
    assert inclusive_analysis is not None

    # Labels.
    # General
    text = labels.make_valid_latex_string(
        inclusive_analysis.alice_label.display_str())
    text += "\n" + labels.system_label(
        energy=inclusive_analysis.collision_energy,
        system=inclusive_analysis.collision_system,
        activity=inclusive_analysis.event_activity)
    text += "\n" + labels.jet_pt_range_string(inclusive_analysis.jet_pt)
    text += "\n" + labels.jet_finding()
    text += "\n" + labels.constituent_cuts()
    text += "\n" + labels.make_valid_latex_string(
        inclusive_analysis.leading_hadron_bias.display_str())
    # Deal with projection range, extraction range string.
    additional_label = _proj_and_extract_range_label(
        inclusive_analysis=inclusive_analysis,
        projection_range_func=projection_range_func,
        extraction_range_func=extraction_range_func,
    )
    if additional_label:
        text += "\n" + additional_label
    # Finally, add the text to the axis.
    ax.text(0.97,
            0.97,
            text,
            horizontalalignment="right",
            verticalalignment="top",
            multialignment="right",
            transform=ax.transAxes)
    # Axes and titles
    ax.set_xlabel(
        labels.make_valid_latex_string(labels.track_pt_display_label()))
    # Apply any specified labels
    if plot_labels.title is not None:
        plot_labels.title = plot_labels.title + f" for {labels.jet_pt_range_string(inclusive_analysis.jet_pt)}"
    plot_labels.apply_labels(ax)
    ax.legend(loc="center right", frameon=False)

    # Final adjustments
    fig.tight_layout()
    # Save plot and cleanup
    plot_base.save_plot(
        output_info, fig,
        f"{output_name}_{inclusive_analysis.jet_pt_identifier}")
    plt.close(fig)