def __init__(self, config_filename: str,
                 selected_analysis_options: params.SelectedAnalysisOptions,
                 **kwargs: str):
        super().__init__(
            config_filename=config_filename,
            selected_analysis_options=selected_analysis_options,
            manager_task_name="GlauberToyModelManager",
            **kwargs,
        )

        # Analysis task
        self.analysis: GlauberPathLengthAnalysis

        # Properties for setting up the Glauber code
        self.glauber_version = self.task_config.get("glauber_version", "3.2")
        self.glauber_directory_name: str = self.task_config.get(
            "glauber_directory_name", "TGlauberMC")

        # Properties
        self.n_events = self.task_config["n_events"]
        # Cross sections (from Table V in 1710.07098)
        self.cross_sections = {
            params.CollisionEnergy.two_seven_six:
            CrossSection(value=61.8, width=0.9),
            params.CollisionEnergy.five_zero_two:
            CrossSection(value=67.6, width=0.6),
        }
        # Impact parameters
        # NOTE: Although ALICE doesn't provide impact parameters, we can compare values (N_coll and N_part)
        #       to ALICE calculated values: https://alice-notes.web.cern.ch/node/711)
        self.impact_parameters = {
            params.EventActivity.central: params.SelectedRange(0, 4.92),
            params.EventActivity.semi_central: params.SelectedRange(8.5, 11),
        }
Esempio n. 2
0
    def test_jet_pt_string(self, logging_mixin):
        """ Test the jet pt string generation functions. Each bin (except for the last) is tested.

        The last pt bin is left for a separate test because it is printed differently
        (see ``test_jet_pt_string_for_last_pt_bin()`` for more).
        """
        pt_bins = []
        for i, (min, max) in enumerate(
                zip(self.jet_pt_bins[:-2], self.jet_pt_bins[1:-1])):
            pt_bins.append(
                analysis_objects.JetPtBin(bin=i,
                                          range=params.SelectedRange(min,
                                                                     max)))

        for pt_bin, expected_min, expected_max in zip(pt_bins,
                                                      self.jet_pt_bins[:-2],
                                                      self.jet_pt_bins[1:-1]):
            logger.debug(
                f"Checking bin {pt_bin}, {pt_bin.range}, {type(pt_bin)}")
            assert labels.jet_pt_range_string(
                pt_bin
            ) == r"$%(lower)s < p_{\text{T,jet}}^{\text{ch+ne}} < %(upper)s\:\mathrm{GeV/\mathit{c}}$" % {
                "lower": expected_min,
                "upper": expected_max
            }
Esempio n. 3
0
    def __init__(self, harmonic: int, detector: str, *args: Any,
                 **kwargs: Any):
        # Base class
        super().__init__(*args, **kwargs)
        # Configuration
        self.harmonic = harmonic
        self.main_detector_name = detector

        # Properties for determining the event plane resolution
        # Here we take all other detectors that aren't the main detector
        self.other_detector_names = [
            name for name in self.task_config.get("detectors")
            if name != self.main_detector_name
        ]
        self.output_ranges = self.task_config.get("output_ranges", None)
        # Validation
        if self.output_ranges is None:
            # Default should be to process each centrality range.
            self.output_ranges = [
                params.SelectedRange(min, max) for min, max in zip(
                    np.linspace(0, 100, 11)[:-1],
                    np.linsapce(0, 100, 11)[1:])
            ]

        # Objects that will be created during the calculation
        self.main_detector: Detector
        self.other_detectors: List[Detector]
        self.resolution: histogram.Histogram1D
        self.selected_resolutions: Dict[params.SelectedRange,
                                        Tuple[float, float]] = {}
 def from_yaml(cls, constructor: yaml.Constructor, data: yaml.ruamel.yaml.nodes.SequenceNode) -> List[PtBin]:
     """ Convert input YAML list to set of ``AnalysisBin``. """
     #logger.debug(f"Using representer, {data}")
     values = [constructor.construct_object(v) for v in data.value]
     bins = []
     for i, (val, val_next) in enumerate(zip(values[:-1], values[1:])):
         bins.append(cls._class(range = params.SelectedRange(min = val, max = val_next), bin = i + 1))
     return bins
 def from_yaml(cls, constructor: yaml.Constructor, data: yaml.ruamel.yaml.nodes.SequenceNode) -> List[PhiBin]:
     """ Convert input YAML list to set of ``PhiBins``. """
     #logger.debug(f"Using representer, {data}")
     # Extract values
     values = [constructor.construct_object(v) for v in data.value]
     # Scale very thing by a factor of pi for convenience.
     values = [v * np.pi for v in values]
     bins = []
     for val, val_next in zip(values[:-1], values[1:]):
         bins.append(cls._class(range = params.SelectedRange(min = val, max = val_next)))
     return bins
def generate_parameters(system: params.CollisionSystem) -> Tuple[np.ndarray, np.ndarray, int, Dict[int, params.SelectedRange]]:
    """ Generate the analysis parameters.

    This can be called multiple times if necessary to retrieve the parameters easily in any function.

    Args:
        system: Collision system.
    Returns:
        (pt_values, eta_values, n_cent_bins, centrality_ranges): Pt values where the efficiency should be evaluated,
            eta values where the efficiency should be evaluated, number of centrality bins, map from centrality bin
            number to centrality bin ranges.
    """
    pt_values = np.linspace(0.15, 9.95, 100 - 1)
    eta_values = np.linspace(-0.85, 0.85, 35)
    n_cent_bins = 4 if system != params.CollisionSystem.pp else 1
    centrality_ranges = {
        0: params.SelectedRange(0, 10),
        1: params.SelectedRange(10, 30),
        2: params.SelectedRange(30, 50),
        3: params.SelectedRange(50, 90),
    }

    return pt_values, eta_values, n_cent_bins, centrality_ranges
def test_analysis_bin_properties(logging_mixin):
    """ Test analysis bin properties.

    Since AnalysisBin is an ABC, we test via ``TrackPtBin``.
    """
    track_pt = analysis_objects.TrackPtBin(range=params.SelectedRange(min=3.0,
                                                                      max=4.0),
                                           bin=6)

    assert track_pt.min == 3.0
    assert track_pt.max == 4.0
    assert track_pt.bin_width == 1.0
    assert track_pt.bin_center == 3.5
    assert track_pt.name == "Track Pt Bin"
Esempio n. 8
0
    def test_jet_pt_string_for_last_pt_bin(self, logging_mixin):
        """ Test the jet pt string generation function for the last jet pt bin.

        In the case of the last pt bin, we only want to show the lower range.
        """
        pt_bin = len(self.jet_pt_bins) - 2
        jet_pt_bin = analysis_objects.JetPtBin(
            bin=pt_bin,
            range=params.SelectedRange(self.jet_pt_bins[pt_bin],
                                       self.jet_pt_bins[pt_bin + 1]))
        assert labels.jet_pt_range_string(
            jet_pt_bin
        ) == r"$%(lower)s < p_{\text{T,jet}}^{\text{ch+ne}}\:\mathrm{GeV/\mathit{c}}$" % {
            "lower": self.jet_pt_bins[-2]
        }
Esempio n. 9
0
    def _track_eta_phi(self) -> None:
        """ Plot the track eta phi.

        Args:
            None.
        Returns:
            None.
        """
        # We need particular formatting, so we just take care of it by hand.
        event_activity_label_map = {
            params.EventActivity.central: "Central",
            params.EventActivity.semi_central: "SemiCentral",
        }
        for event_activity in [
                params.EventActivity.central, params.EventActivity.semi_central
        ]:
            # Setup
            task_name = self.task_config["jet_hadron_base_task_name"]
            task_name += f"{event_activity_label_map[event_activity]}"
            # Track pt bin values are from the jet-hadron task.
            track_pt_bin_values = [0.5, 1, 2, 3, 5, 8, 20]
            # Convert into track pt bins.
            track_pt_bins = [
                analysis_objects.TrackPtBin(params.SelectedRange(
                    min_value, max_value),
                                            bin=bin_number)
                for bin_number, (min_value, max_value) in enumerate(
                    zip(track_pt_bin_values[:-1], track_pt_bin_values[1:]))
            ]

            # Retrieve the hists
            hists: List[Hist] = []
            for track_pt_bin in track_pt_bins:
                hists.append(self.input_hists[task_name]
                             [f"fHistTrackEtaPhi_{track_pt_bin.bin}"])

            # Merge the hists together. We don't really need the track pt dependence.
            output_hist = hists[0]
            for h in hists[1:]:
                output_hist.Add(h)

            # Lastly, plot
            plot_general.track_eta_phi(
                hist=output_hist,
                event_activity=event_activity,
                output_info=self.output_info,
            )
Esempio n. 10
0
def test_selected_range_alternative_from_yaml(logging_mixin):
    """ Test the alternative mode for constructing a ``SelectedRange``.

    For this mode, we just pass a list of values, with the minimum value first instead
    of specifying the keyword arguments. This is a nice short hand when writing config
    files by hand.
    """
    # Setup
    # YAML object
    y = yaml.yaml(modules_to_register=[params])
    input_string = "r: !SelectedRange [-5, 15]"
    expected_obj = params.SelectedRange(min=-5, max=15)
    s = StringIO()
    s.write(input_string)
    s.seek(0)
    obj = y.load(s)

    # Check that the objects are the same.
    assert obj["r"] == expected_obj
Esempio n. 11
0
    def test_track_pt_strings(self, logging_mixin):
        """ Test the track pt string generation functions. Each bin is tested.  """
        pt_bins = []
        for i, (min, max) in enumerate(
                zip(self.track_pt_bins[:-1], self.track_pt_bins[1:])):
            pt_bins.append(
                analysis_objects.TrackPtBin(bin=i,
                                            range=params.SelectedRange(
                                                min, max)))

        for pt_bin, expected_min, expected_max in zip(pt_bins,
                                                      self.track_pt_bins[:-1],
                                                      self.track_pt_bins[1:]):
            logger.debug(
                f"Checking bin {pt_bin}, {pt_bin.range}, {type(pt_bin)}")
            assert labels.track_pt_range_string(
                pt_bin
            ) == r"$%(lower)s < p_{\text{T}}^{\text{assoc}} < %(upper)s\:\mathrm{GeV/\mathit{c}}$" % {
                "lower": expected_min,
                "upper": expected_max
            }
 def from_yaml(cls, constructor: yaml.Constructor, data: yaml.ruamel.yaml.nodes.MappingNode) -> List[PtHardBin]:
     """ Convert input YAML list to set of ``PtHardBin``. """
     # Construct the underlying list and dict to make parsing simpler.
     configuration = {constructor.construct_object(key_node): constructor.construct_object(value_node) for key_node, value_node in data.value}
     # Extract the relevant data
     bins = configuration["bins"]
     train_numbers = configuration["train_numbers"]
     # Create the PtHardBin objects.
     pt_bins = []
     # Sanity check
     if len(train_numbers) != len(bins) - 1:
         raise ValueError(f"Number of trains: {len(train_numbers)} is not equal to number of bins: {len(bins) - 1}")
     # Assign the bins
     for pt, pt_next, (bin_label, train_number) in zip(bins[:-1], bins[1:], train_numbers.items()):
         pt_bins.append(
             PtHardBin(
                 bin = bin_label,
                 range = params.SelectedRange(min = pt, max = pt_next),
                 train_number = train_number,
             )
         )
     return pt_bins
Esempio n. 13
0
class TestIteratePtBins:
    _track_pt_bins = [0.15, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 10.0]
    track_pt_bins = [
        analysis_objects.TrackPtBin(range=params.SelectedRange(min, max),
                                    bin=i + 1)
        for i, (min,
                max) in enumerate(zip(_track_pt_bins[:-1], _track_pt_bins[1:]))
    ]
    _jet_pt_bins = [15.0, 20.0, 40.0, 60.0, 200.0]
    jet_pt_bins = [
        analysis_objects.JetPtBin(range=params.SelectedRange(min, max),
                                  bin=i + 1)
        for i, (min,
                max) in enumerate(zip(_jet_pt_bins[:-1], _jet_pt_bins[1:]))
    ]

    def test_iterate_over_track_pt_bins(self, logging_mixin):
        """ Test the track pt bins generator.

        Note that we wrap the function in list so we get all of the values from the generator.
        """
        assert len(self.track_pt_bins) == 9
        assert list(params.iterate_over_track_pt_bins(
            self.track_pt_bins)) == list(self.track_pt_bins)

    def test_iterate_over_track_pt_bins_with_config(self, logging_mixin):
        """ Test the track pt bins generator with some bins skipped.

        The values to skip were not selected with any paticular critera except to be non-continuous.
        """
        skip_bins = [2, 6]
        comparison_bins = [
            x for x in self.track_pt_bins if x.bin not in skip_bins
        ]
        config = {"skipPtBins": {"track": skip_bins}}
        assert list(
            params.iterate_over_track_pt_bins(
                bins=self.track_pt_bins, config=config)) == comparison_bins

    def test_iterate_over_jet_pt_bins(self, logging_mixin):
        """ Test the jet pt bins generator.

        Note that we wrap the function in list so we get all of the values from the generator.
        """
        # Ensure that we have the expected number of jet pt bins
        assert len(self.jet_pt_bins) == 4
        # Then test the actual iterable.
        assert list(params.iterate_over_jet_pt_bins(self.jet_pt_bins)) == list(
            self.jet_pt_bins)

    def test_iterate_over_jet_pt_bins_with_config(self, logging_mixin):
        """ Test the jet pt bins generator with some bins skipped.

        The values to skip were not selected with any paticular critera except to be non-continuous.
        """
        skip_bins = [1, 2]
        comparison_bins = [
            x for x in self.jet_pt_bins if x.bin not in skip_bins
        ]
        config = {"skipPtBins": {"jet": skip_bins}}
        assert list(
            params.iterate_over_jet_pt_bins(bins=self.jet_pt_bins,
                                            config=config)) == comparison_bins

    def test_iterate_over_jet_and_track_pt_bins(self, logging_mixin):
        """ Test the jet and track pt bins generator.

        Note that we wrap the function in list so we get all of the values from the generator.
        """
        comparison_bins = [(x, y) for x in self.jet_pt_bins
                           for y in self.track_pt_bins]
        assert list(
            params.iterate_over_jet_and_track_pt_bins(
                jet_pt_bins=self.jet_pt_bins,
                track_pt_bins=self.track_pt_bins)) == comparison_bins

    def test_iterate_over_jet_and_track_pt_bins_with_config(
            self, logging_mixin):
        """ Test the jet and track pt bins generator with some bins skipped.

        The values to skip were not selected with any paticular critera except to be non-continuous.
        """
        skip_jet_pt_bins = [1, 4]
        skip_track_pt_bins = [2, 6]
        comparison_bins = [(x, y) for x in self.jet_pt_bins
                           for y in self.track_pt_bins
                           if x.bin not in skip_jet_pt_bins
                           and y.bin not in skip_track_pt_bins]
        config = {
            "skipPtBins": {
                "jet": skip_jet_pt_bins,
                "track": skip_track_pt_bins
            }
        }
        # Check that the comparison bins are as expected.
        comparison_bin_bins = [(x.bin, y.bin) for (x, y) in comparison_bins]
        assert comparison_bin_bins == [(2, 1), (2, 3), (2, 4), (2, 5), (2, 7),
                                       (2, 8), (2, 9), (3, 1), (3, 3), (3, 4),
                                       (3, 5), (3, 7), (3, 8), (3, 9)]
        # Then check the actual output.
        assert list(
            params.iterate_over_jet_and_track_pt_bins(
                jet_pt_bins=self.jet_pt_bins,
                track_pt_bins=self.track_pt_bins,
                config=config)) == comparison_bins

    @pytest.mark.parametrize("bin_type_name, skip_bins", [
        ("track", [2, 38]),
        ("jet", [2, 5]),
    ],
                             ids=["Track", "Jet"])
    def test_out_of_range_skip_track_bin(self, logging_mixin, bin_type_name,
                                         skip_bins):
        """ Test that an except is generated if a skip bin is out of range.

        The test is performed both with a in range and out of range bin to ensure
        the exception is thrown on the right value.
        """
        if bin_type_name == "track":
            bins = self.track_pt_bins
            func = params.iterate_over_track_pt_bins
        elif bin_type_name == "jet":
            bins = self.jet_pt_bins
            func = params.iterate_over_jet_pt_bins
        else:
            # Unrecognized.
            bins = None
            func = None

        config = {"skipPtBins": {bin_type_name: skip_bins}}
        with pytest.raises(ValueError) as exception_info:
            list(func(bins=bins, config=config))
        # NOTE: ExecptionInfo is a wrapper around the exception. `.value` is the actual exectpion
        #       and then we want to check the value of the first arg, which contains the value
        #       that causes the exception.
        assert exception_info.value.args[0] == skip_bins[1]
Esempio n. 14
0
    # for formatting in later paramterizations.
    expected_display_str = expected["display_str"] % {
        "embedded_additional_label": embedded_additional_label
    }

    assert str(system) == expected["str"]
    assert system.display_str(
        embedded_additional_label=embedded_additional_label
    ) == expected_display_str


@pytest.mark.parametrize("activity, expected",
                         [(params.EventActivity["inclusive"], {
                             "str": "inclusive",
                             "display_str": "",
                             "range": params.SelectedRange(min=-1, max=-1)
                         }),
                          (params.EventActivity["central"], {
                              "str": "central",
                              "display_str": r"0 \textendash 10 \%",
                              "range": params.SelectedRange(min=0, max=10)
                          }),
                          (params.EventActivity["semi_central"], {
                              "str": "semi_central",
                              "display_str": r"30 \textendash 50 \%",
                              "range": params.SelectedRange(min=30, max=50)
                          })],
                         ids=["inclusive", "central", "semi_central"])
def test_event_activity(logging_mixin, activity, expected):
    """ Test event activity values. """
    assert str(activity) == expected["str"]
Esempio n. 15
0
 def extraction_range(self) -> params.SelectedRange:
     """ Helper to retrieve the extraction range. """
     return params.SelectedRange(
         min=self.central_value - self.extraction_limit,
         max=self.central_value + self.extraction_limit)