Ejemplo n.º 1
0
 def test_find(particle_database: ParticleCollection):
     f2_1950 = particle_database.find(9050225)
     assert f2_1950.name == "f(2)(1950)"
     assert f2_1950.mass == 1.936
     phi = particle_database.find("phi(1020)")
     assert phi.pid == 333
     assert pytest.approx(phi.width) == 0.004249
Ejemplo n.º 2
0
def build_particle_collection(definition: dict,
                              validate: bool = True) -> ParticleCollection:
    if validate:
        validation.particle_list(definition)
    definition = definition["ParticleList"]
    particles = ParticleCollection()
    for name, particle_def in definition.items():
        particles.add(build_particle(name, particle_def))
    return particles
Ejemplo n.º 3
0
def particle_selection(output_dir, particle_database: ParticleCollection):
    selection = ParticleCollection()
    selection += particle_database.filter(lambda p: p.name.startswith("pi"))
    selection += particle_database.filter(lambda p: p.name.startswith("K"))
    selection += particle_database.filter(lambda p: p.name.startswith("D"))
    selection += particle_database.filter(lambda p: p.name.startswith("J/psi"))
    io.write(selection, output_dir + XML_FILE)
    io.write(selection, output_dir + YAML_FILE)
    return selection
Ejemplo n.º 4
0
def _generate_particle_collection(
    graphs: List[StateTransitionGraph[ParticleWithSpin]],
) -> ParticleCollection:
    particles = ParticleCollection()
    for graph in graphs:
        for edge_props in map(graph.get_edge_props, graph.edges):
            particle_name = edge_props[0].name
            if particle_name not in particles:
                particles.add(edge_props[0])
    return particles
Ejemplo n.º 5
0
 def get_intermediate_particles(self) -> ParticleCollection:
     """Extract the names of the intermediate state particles."""
     intermediate_states = ParticleCollection()
     for graph in self.solutions:
         for edge_props in map(
             graph.get_edge_props, graph.get_intermediate_state_edge_ids()
         ):
             if edge_props:
                 particle, _ = edge_props
                 if particle not in intermediate_states:
                     intermediate_states.add(particle)
     return intermediate_states
Ejemplo n.º 6
0
 def test_exceptions(particle_database: ParticleCollection):
     gamma = particle_database["gamma"]
     with pytest.raises(KeyError):
         particle_database += create_particle(gamma, name="gamma_new")
     with pytest.raises(NotImplementedError):
         particle_database.find(3.14)  # type: ignore
     with pytest.raises(NotImplementedError):
         particle_database += 3.14  # type: ignore
     with pytest.raises(NotImplementedError):
         assert 3.14 in particle_database
     with pytest.raises(AssertionError):
         assert gamma == "gamma"
Ejemplo n.º 7
0
def load_pdg() -> ParticleCollection:
    all_pdg_particles = PdgDatabase.findall(
        lambda item: item.charge is not None and item.charge.is_integer(
        )  # remove quarks
        and item.J is not None  # remove new physics and nuclei
        and abs(item.pdgid) < 1e9  # p and n as nucleus
        and item.name not in __skip_particles and not (
            item.mass is None and not item.name.startswith("nu")))
    particle_collection = ParticleCollection()
    for pdg_particle in all_pdg_particles:
        new_particle = __convert_pdg_instance(pdg_particle)
        particle_collection.add(new_particle)
    return particle_collection
Ejemplo n.º 8
0
def create_particle(edge_props: GraphEdgePropertyMap,
                    particles: ParticleCollection) -> ParticleWithSpin:
    """Create a Particle with spin projection from a qn dictionary.

    The implementation assumes the edge properties match the attributes of a
    particle inside the `.ParticleCollection`.

    Args:
        edge_props: The quantum number dictionary.
        particles: A `.ParticleCollection` which is used to retrieve a
          reference particle instance to lower the memory footprint.

    Raises:
        KeyError: If the edge properties do not contain the pid information or
          no particle with the same pid is found in the `.ParticleCollection`.

        ValueError: If the edge properties do not contain spin projection info.
    """
    particle = particles.find(int(edge_props[EdgeQuantumNumbers.pid]))
    if EdgeQuantumNumbers.spin_projection not in edge_props:
        raise ValueError(
            "GraphEdgePropertyMap does not contain a spin projection!")
    spin_projection = edge_props[EdgeQuantumNumbers.spin_projection]

    return (particle, spin_projection)
Ejemplo n.º 9
0
    def test_add(self, particle_database: ParticleCollection):
        subset_copy = particle_database.filter(
            lambda p: p.name.startswith("omega"))
        subset_copy += particle_database.filter(
            lambda p: p.name.startswith("pi"))
        n_subset = len(subset_copy)

        new_particle = create_particle(
            particle_database.find(443),
            pid=666,
            name="EpEm",
            mass=1.0,
            width=0.0,
        )
        subset_copy.add(new_particle)
        assert len(subset_copy) == n_subset + 1
        assert subset_copy["EpEm"] is new_particle
Ejemplo n.º 10
0
def test_pdg_size(pdg: ParticleCollection):
    assert len(pdg) in [
        512,  # particle==0.13
        519,  # particle==0.14
    ]
    assert len(pdg.filter(lambda p: "~" in p.name)) in [
        165,  # particle==0.13
        172,  # particle==0.14
    ]
Ejemplo n.º 11
0
def build_particle_collection(definition: dict) -> ParticleCollection:
    if isinstance(definition, dict):
        definition = definition.get("root", definition)
    if isinstance(definition, dict):
        definition = definition.get("ParticleList", definition)
    if isinstance(definition, dict):
        definition = definition.get("Particle", definition)
    if isinstance(definition, list):
        particle_list: Union[List[dict], ValuesView] = definition
    elif isinstance(definition, dict):
        particle_list = definition.values()
    else:
        raise ValueError(
            "The following definition cannot be converted to a ParticleCollection\n"
            f"{definition}")
    collection = ParticleCollection()
    for particle_def in particle_list:
        collection.add(build_particle(particle_def))
    return collection
Ejemplo n.º 12
0
 def test_add_warnings(self, particle_database: ParticleCollection, caplog):
     pions = particle_database.filter(lambda p: p.name.startswith("pi"))
     pi_plus = pions["pi+"]
     caplog.clear()
     with caplog.at_level(logging.WARNING):
         pions.add(create_particle(pi_plus, name="new pi+", mass=0.0))
     assert f"{pi_plus.pid}" in caplog.text
     caplog.clear()
     with caplog.at_level(logging.WARNING):
         pions.add(create_particle(pi_plus, width=1.0))
     assert "pi+" in caplog.text
Ejemplo n.º 13
0
    def test_filter(particle_database: ParticleCollection):
        search_result = particle_database.filter(lambda p: "f(0)" in p.name)
        f0_1500_from_subset = search_result["f(0)(1500)"]
        assert len(search_result) == 5
        assert f0_1500_from_subset.mass == 1.506
        assert f0_1500_from_subset is particle_database["f(0)(1500)"]
        assert f0_1500_from_subset is not particle_database["f(0)(980)"]

        search_result = particle_database.filter(lambda p: p.pid == 22)
        gamma_from_subset = search_result["gamma"]
        assert len(search_result) == 1
        assert gamma_from_subset.pid == 22
        assert gamma_from_subset is particle_database["gamma"]
        filtered_result = particle_database.filter(
            lambda p: p.mass > 1.8 and p.mass < 2.0 and p.spin == 2 and p.
            strangeness == 1)
        assert filtered_result.names == {
            "K(2)(1820)0",
            "K(2)(1820)+",
        }
Ejemplo n.º 14
0
def test_create_antiparticle_tilde(particle_database: ParticleCollection):
    anti_particles = particle_database.filter(lambda p: "~" in p.name)
    assert len(anti_particles) in [
        165,  # particle==0.13
        172,  # particle==0.14
    ]
    for anti_particle in anti_particles:
        particle_name = anti_particle.name.replace("~", "")
        if "+" in particle_name:
            particle_name = particle_name.replace("+", "-")
        elif "-" in particle_name:
            particle_name = particle_name.replace("-", "+")
        created_particle = create_antiparticle(anti_particle, particle_name)

        assert created_particle == particle_database[particle_name]
Ejemplo n.º 15
0
def test_create_antiparticle_by_pid(particle_database: ParticleCollection):
    n_particles_with_neg_pid = 0
    for particle in particle_database:
        anti_particles_by_pid = particle_database.filter(
            lambda p: p.pid == -particle.pid  # pylint: disable=cell-var-from-loop
        )
        if len(anti_particles_by_pid) != 1:
            continue
        n_particles_with_neg_pid += 1
        anti_particle = next(iter(anti_particles_by_pid))
        particle_from_anti = -anti_particle
        assert particle == particle_from_anti
    assert n_particles_with_neg_pid in [
        428,  # particle==0.13
        442,  # particle==0.14
    ]
Ejemplo n.º 16
0
    def test_discard(self, particle_database: ParticleCollection):
        pions = particle_database.filter(lambda p: p.name.startswith("pi"))
        n_pions = len(pions)
        pim = pions["pi-"]
        pip = pions["pi+"]

        pions.discard(pions["pi+"])
        assert len(pions) == n_pions - 1
        assert "pi+" not in pions
        assert pip.name == "pi+"  # still exists

        pions.remove("pi-")
        assert len(pions) == n_pions - 2
        assert pim not in pions
        assert pim.name == "pi-"  # still exists

        with pytest.raises(NotImplementedError):
            pions.discard(111)  # type: ignore
Ejemplo n.º 17
0
    def collapse_graphs(
        self,
    ) -> List[StateTransitionGraph[ParticleCollection]]:
        def merge_into(
            graph: StateTransitionGraph[Particle],
            merged_graph: StateTransitionGraph[ParticleCollection],
        ) -> None:
            if (
                graph.get_intermediate_state_edge_ids()
                != merged_graph.get_intermediate_state_edge_ids()
            ):
                raise ValueError(
                    "Cannot merge graphs that don't have the same edge IDs"
                )
            for i in graph.edges:
                particle = graph.get_edge_props(i)
                other_particles = merged_graph.get_edge_props(i)
                if particle not in other_particles:
                    other_particles += particle

        def is_same_shape(
            graph: StateTransitionGraph[Particle],
            merged_graph: StateTransitionGraph[ParticleCollection],
        ) -> bool:
            if graph.edges != merged_graph.edges:
                return False
            for edge_id in (
                graph.get_initial_state_edge_ids()
                + graph.get_final_state_edge_ids()
            ):
                edge_prop = merged_graph.get_edge_props(edge_id)
                if len(edge_prop) != 1:
                    return False
                other_particle = next(iter(edge_prop))
                if other_particle != graph.get_edge_props(edge_id):
                    return False
            return True

        graphs = self.get_particle_graphs()
        inventory: List[StateTransitionGraph[ParticleCollection]] = list()
        for graph in graphs:
            append_to_inventory = True
            for merged_graph in inventory:
                if is_same_shape(graph, merged_graph):
                    merge_into(graph, merged_graph)
                    append_to_inventory = False
                    break
            if append_to_inventory:
                new_edge_props = {
                    edge_id: ParticleCollection(
                        {graph.get_edge_props(edge_id)}
                    )
                    for edge_id in graph.edges
                }
                inventory.append(
                    StateTransitionGraph[ParticleCollection](
                        topology=Topology(
                            nodes=set(graph.nodes), edges=graph.edges
                        ),
                        node_props={
                            i: graph.get_node_props(i) for i in graph.nodes
                        },
                        edge_props=new_edge_props,
                    )
                )
        return inventory
Ejemplo n.º 18
0
class StateTransitionManager:  # pylint: disable=too-many-instance-attributes
    """Main handler for decay topologies.

    .. seealso:: :doc:`/usage/workflow` and `generate`
    """

    def __init__(  # pylint: disable=too-many-arguments,too-many-branches
        self,
        initial_state: Sequence[StateDefinition],
        final_state: Sequence[StateDefinition],
        particles: Optional[ParticleCollection] = None,
        allowed_intermediate_particles: Optional[List[str]] = None,
        interaction_type_settings: Dict[
            InteractionTypes, Tuple[EdgeSettings, NodeSettings]
        ] = None,
        formalism_type: str = "helicity",
        topology_building: str = "isobar",
        number_of_threads: Optional[int] = None,
        solving_mode: SolvingMode = SolvingMode.Fast,
        reload_pdg: bool = False,
    ) -> None:
        if interaction_type_settings is None:
            interaction_type_settings = {}
        allowed_formalism_types = [
            "helicity",
            "canonical-helicity",
            "canonical",
        ]
        if formalism_type not in allowed_formalism_types:
            raise NotImplementedError(
                f"Formalism type {formalism_type} not implemented."
                f" Use {allowed_formalism_types} instead."
            )
        self.__formalism_type = str(formalism_type)
        self.__particles = ParticleCollection()
        if particles is not None:
            self.__particles = particles
        if number_of_threads is None:
            self.number_of_threads = multiprocessing.cpu_count()
        else:
            self.number_of_threads = int(number_of_threads)
        self.reaction_mode = str(solving_mode)
        self.initial_state = initial_state
        self.final_state = final_state
        self.interaction_type_settings = interaction_type_settings

        self.interaction_determinators: List[InteractionDeterminator] = [
            LeptonCheck(),
            GammaCheck(),
        ]
        self.final_state_groupings: Optional[List[List[List[str]]]] = None
        self.allowed_interaction_types: List[InteractionTypes] = [
            InteractionTypes.Strong,
            InteractionTypes.EM,
            InteractionTypes.Weak,
        ]
        self.filter_remove_qns: Set[Type[NodeQuantumNumber]] = set()
        self.filter_ignore_qns: Set[Type[NodeQuantumNumber]] = set()
        if formalism_type == "helicity":
            self.filter_remove_qns = {
                NodeQuantumNumbers.l_magnitude,
                NodeQuantumNumbers.l_projection,
                NodeQuantumNumbers.s_magnitude,
                NodeQuantumNumbers.s_projection,
            }
        if "helicity" in formalism_type:
            self.filter_ignore_qns = {NodeQuantumNumbers.parity_prefactor}
        int_nodes = []
        use_mass_conservation = True
        use_nbody_topology = False
        if topology_building == "isobar":
            if len(initial_state) == 1:
                int_nodes.append(InteractionNode("TwoBodyDecay", 1, 2))
        else:
            int_nodes.append(
                InteractionNode(
                    "NBodyScattering", len(initial_state), len(final_state)
                )
            )
            use_nbody_topology = True
            # turn of mass conservation, in case more than one initial state
            # particle is present
            if len(initial_state) > 1:
                use_mass_conservation = False

        if not self.interaction_type_settings:
            self.interaction_type_settings = (
                create_default_interaction_settings(
                    formalism_type,
                    nbody_topology=use_nbody_topology,
                    use_mass_conservation=use_mass_conservation,
                )
            )
        self.topology_builder = SimpleStateTransitionTopologyBuilder(int_nodes)

        if reload_pdg or len(self.__particles) == 0:
            self.__particles = load_default_particles()

        self.__allowed_intermediate_particles = list()
        if allowed_intermediate_particles is not None:
            self.set_allowed_intermediate_particles(
                allowed_intermediate_particles
            )
        else:
            self.__allowed_intermediate_particles = [
                create_edge_properties(x) for x in self.__particles
            ]

    def set_allowed_intermediate_particles(
        self, particle_names: List[str]
    ) -> None:
        self.__allowed_intermediate_particles = list()
        for particle_name in particle_names:
            matches = self.__particles.filter(
                lambda p: particle_name  # pylint: disable=cell-var-from-loop
                in p.name
            )
            if len(matches) == 0:
                raise LookupError(
                    "Could not find any matches for allowed intermediate "
                    f' particle "{particle_name}"'
                )
            self.__allowed_intermediate_particles += [
                create_edge_properties(x) for x in matches
            ]

    @property
    def formalism_type(self) -> str:
        return self.__formalism_type

    def set_topology_builder(
        self, topology_builder: SimpleStateTransitionTopologyBuilder
    ) -> None:
        self.topology_builder = topology_builder

    def add_final_state_grouping(
        self, fs_group: List[Union[str, List[str]]]
    ) -> None:
        if not isinstance(fs_group, list):
            raise ValueError(
                "The final state grouping has to be of type list."
            )
        if len(fs_group) > 0:
            if self.final_state_groupings is None:
                self.final_state_groupings = list()
            if not isinstance(fs_group[0], list):
                fs_group = [fs_group]  # type: ignore
            self.final_state_groupings.append(fs_group)  # type: ignore

    def set_allowed_interaction_types(
        self, allowed_interaction_types: List[InteractionTypes]
    ) -> None:
        # verify order
        for allowed_types in allowed_interaction_types:
            if not isinstance(allowed_types, InteractionTypes):
                raise TypeError(
                    "allowed interaction types must be of type"
                    "[InteractionTypes]"
                )
            if allowed_types not in self.interaction_type_settings:
                logging.info(self.interaction_type_settings.keys())
                raise ValueError(
                    f"interaction {allowed_types} not found in settings"
                )
        self.allowed_interaction_types = allowed_interaction_types

    def create_problem_sets(self) -> Dict[float, List[ProblemSet]]:
        topology_graphs = self.__build_topologies()
        problem_sets = []
        for topology in topology_graphs:
            for initial_facts in self.__create_initial_facts(topology):
                problem_sets.extend(
                    [
                        ProblemSet(
                            topology=topology,
                            initial_facts=initial_facts,
                            solving_settings=x,
                        )
                        for x in self.__determine_graph_settings(
                            topology, initial_facts
                        )
                    ]
                )
        # create groups of settings ordered by "probability"
        return _group_by_strength(problem_sets)

    def __build_topologies(self) -> List[Topology]:
        all_graphs = self.topology_builder.build_graphs(
            len(self.initial_state), len(self.final_state)
        )
        logging.info(f"number of topology graphs: {len(all_graphs)}")
        return all_graphs

    def __create_initial_facts(self, topology: Topology) -> List[InitialFacts]:
        initial_facts = create_initial_facts(
            topology=topology,
            particles=self.__particles,
            initial_state=self.initial_state,
            final_state=self.final_state,
            final_state_groupings=self.final_state_groupings,
        )

        logging.info(f"initialized {len(initial_facts)} graphs!")
        return initial_facts

    def __determine_graph_settings(
        self, topology: Topology, initial_facts: InitialFacts
    ) -> List[GraphSettings]:
        # pylint: disable=too-many-locals
        final_state_edges = topology.get_final_state_edge_ids()
        initial_state_edges = topology.get_initial_state_edge_ids()
        graph_settings: List[GraphSettings] = [
            GraphSettings(
                edge_settings={
                    edge_id: self.interaction_type_settings[
                        InteractionTypes.Weak
                    ][0]
                    for edge_id in topology.edges
                },
                node_settings={},
            )
        ]

        for node_id in topology.nodes:
            interaction_types: List[InteractionTypes] = []
            out_edge_ids = topology.get_edge_ids_outgoing_from_node(node_id)
            in_edge_ids = topology.get_edge_ids_outgoing_from_node(node_id)
            in_edge_props = [
                initial_facts.edge_props[edge_id]
                for edge_id in [
                    x for x in in_edge_ids if x in initial_state_edges
                ]
            ]
            out_edge_props = [
                initial_facts.edge_props[edge_id]
                for edge_id in [
                    x for x in out_edge_ids if x in final_state_edges
                ]
            ]
            node_props = InteractionProperties()
            if node_id in initial_facts.node_props:
                node_props = initial_facts.node_props[node_id]
            for int_det in self.interaction_determinators:
                determined_interactions = int_det.check(
                    in_edge_props, out_edge_props, node_props
                )
                if interaction_types:
                    interaction_types = list(
                        set(determined_interactions) & set(interaction_types)
                    )
                else:
                    interaction_types = determined_interactions
            interaction_types = filter_interaction_types(
                interaction_types, self.allowed_interaction_types
            )
            logging.debug(
                "using %s interaction order for node: %s",
                str(interaction_types),
                str(node_id),
            )

            temp_graph_settings: List[GraphSettings] = graph_settings
            graph_settings = []
            for temp_setting in temp_graph_settings:
                for int_type in interaction_types:
                    updated_setting = deepcopy(temp_setting)
                    updated_setting.node_settings[node_id] = deepcopy(
                        self.interaction_type_settings[int_type][1]
                    )
                    graph_settings.append(updated_setting)

        return graph_settings

    def find_solutions(
        self,
        problem_sets: Dict[float, List[ProblemSet]],
    ) -> Result:
        # pylint: disable=too-many-locals
        """Check for solutions for a specific set of interaction settings."""
        results: Dict[float, Result] = {}
        logging.info(
            "Number of interaction settings groups being processed: %d",
            len(problem_sets),
        )
        total = sum(map(len, problem_sets.values()))
        progress_bar = tqdm(
            total=total,
            desc="Propagating quantum numbers",
            disable=logging.getLogger().level > logging.WARNING,
        )
        for strength, problems in sorted(problem_sets.items(), reverse=True):
            logging.info(
                "processing interaction settings group with "
                f"strength {strength}",
            )
            logging.info(f"{len(problems)} entries in this group")
            logging.info(f"running with {self.number_of_threads} threads...")

            qn_problems = [_convert_to_qn_problem_set(x) for x in problems]

            # Because of pickling problems of Generic classes (in this case
            # StateTransitionGraph), multithreaded code has to work with
            # QNProblemSet's and QNResult's. So the appropriate conversions
            # have to be done before and after
            temp_qn_results: List[Tuple[QNProblemSet, QNResult]] = []
            if self.number_of_threads > 1:
                with Pool(self.number_of_threads) as pool:
                    for qn_result in pool.imap_unordered(
                        self._solve, qn_problems, 1
                    ):
                        temp_qn_results.append(qn_result)
                        progress_bar.update()
            else:
                for problem in qn_problems:
                    temp_qn_results.append(self._solve(problem))
                    progress_bar.update()
            for temp_qn_result in temp_qn_results:
                temp_result = self.__convert_result(
                    temp_qn_result[0].topology,
                    temp_qn_result[1],
                )
                if strength not in results:
                    results[strength] = temp_result
                else:
                    results[strength].extend(temp_result, True)
            if (
                results[strength].solutions
                and self.reaction_mode == SolvingMode.Fast
            ):
                break
        progress_bar.close()

        for key, result in results.items():
            logging.info(
                f"number of solutions for strength ({key}) "
                f"after qn solving: {len(result.solutions)}",
            )

        final_result = Result()
        for temp_result in results.values():
            final_result.extend(temp_result)

        # remove duplicate solutions, which only differ in the interaction qns
        final_solutions = remove_duplicate_solutions(
            final_result.solutions,
            self.filter_remove_qns,
            self.filter_ignore_qns,
        )

        if final_solutions:
            match_external_edges(final_solutions)
        return Result(
            final_solutions,
            final_result.not_executed_node_rules,
            final_result.violated_node_rules,
            final_result.not_executed_edge_rules,
            final_result.violated_edge_rules,
            formalism_type=self.formalism_type,
        )

    def _solve(
        self, qn_problem_set: QNProblemSet
    ) -> Tuple[QNProblemSet, QNResult]:
        solver = CSPSolver(self.__allowed_intermediate_particles)

        return (qn_problem_set, solver.find_solutions(qn_problem_set))

    def __convert_result(
        self, topology: Topology, qn_result: QNResult
    ) -> Result:
        """Converts a `.QNResult` with a `.Topology` into a `.Result`.

        The ParticleCollection is used to retrieve a particle instance
        reference to lower the memory footprint.
        """
        solutions = []
        for solution in qn_result.solutions:
            graph = StateTransitionGraph[ParticleWithSpin](
                topology=topology,
                node_props={
                    i: create_interaction_properties(x)
                    for i, x in solution.node_quantum_numbers.items()
                },
                edge_props={
                    i: create_particle(x, self.__particles)
                    for i, x in solution.edge_quantum_numbers.items()
                },
            )
            graph.graph_node_properties_comparator = (
                CompareGraphNodePropertiesFunctor()
            )
            solutions.append(graph)

        return Result(
            solutions=solutions,
            violated_edge_rules=qn_result.violated_edge_rules,
            violated_node_rules=qn_result.violated_node_rules,
            not_executed_node_rules=qn_result.not_executed_node_rules,
            not_executed_edge_rules=qn_result.not_executed_edge_rules,
            formalism_type=self.__formalism_type,
        )
Ejemplo n.º 19
0
 def test_find_fail(self, particle_database: ParticleCollection,
                    search_term):
     with pytest.raises(LookupError):
         particle_database.find(search_term)
Ejemplo n.º 20
0
    def __init__(  # pylint: disable=too-many-arguments,too-many-branches
        self,
        initial_state: Sequence[StateDefinition],
        final_state: Sequence[StateDefinition],
        particles: Optional[ParticleCollection] = None,
        allowed_intermediate_particles: Optional[List[str]] = None,
        interaction_type_settings: Dict[
            InteractionTypes, Tuple[EdgeSettings, NodeSettings]
        ] = None,
        formalism_type: str = "helicity",
        topology_building: str = "isobar",
        number_of_threads: Optional[int] = None,
        solving_mode: SolvingMode = SolvingMode.Fast,
        reload_pdg: bool = False,
    ) -> None:
        if interaction_type_settings is None:
            interaction_type_settings = {}
        allowed_formalism_types = [
            "helicity",
            "canonical-helicity",
            "canonical",
        ]
        if formalism_type not in allowed_formalism_types:
            raise NotImplementedError(
                f"Formalism type {formalism_type} not implemented."
                f" Use {allowed_formalism_types} instead."
            )
        self.__formalism_type = str(formalism_type)
        self.__particles = ParticleCollection()
        if particles is not None:
            self.__particles = particles
        if number_of_threads is None:
            self.number_of_threads = multiprocessing.cpu_count()
        else:
            self.number_of_threads = int(number_of_threads)
        self.reaction_mode = str(solving_mode)
        self.initial_state = initial_state
        self.final_state = final_state
        self.interaction_type_settings = interaction_type_settings

        self.interaction_determinators: List[InteractionDeterminator] = [
            LeptonCheck(),
            GammaCheck(),
        ]
        self.final_state_groupings: Optional[List[List[List[str]]]] = None
        self.allowed_interaction_types: List[InteractionTypes] = [
            InteractionTypes.Strong,
            InteractionTypes.EM,
            InteractionTypes.Weak,
        ]
        self.filter_remove_qns: Set[Type[NodeQuantumNumber]] = set()
        self.filter_ignore_qns: Set[Type[NodeQuantumNumber]] = set()
        if formalism_type == "helicity":
            self.filter_remove_qns = {
                NodeQuantumNumbers.l_magnitude,
                NodeQuantumNumbers.l_projection,
                NodeQuantumNumbers.s_magnitude,
                NodeQuantumNumbers.s_projection,
            }
        if "helicity" in formalism_type:
            self.filter_ignore_qns = {NodeQuantumNumbers.parity_prefactor}
        int_nodes = []
        use_mass_conservation = True
        use_nbody_topology = False
        if topology_building == "isobar":
            if len(initial_state) == 1:
                int_nodes.append(InteractionNode("TwoBodyDecay", 1, 2))
        else:
            int_nodes.append(
                InteractionNode(
                    "NBodyScattering", len(initial_state), len(final_state)
                )
            )
            use_nbody_topology = True
            # turn of mass conservation, in case more than one initial state
            # particle is present
            if len(initial_state) > 1:
                use_mass_conservation = False

        if not self.interaction_type_settings:
            self.interaction_type_settings = (
                create_default_interaction_settings(
                    formalism_type,
                    nbody_topology=use_nbody_topology,
                    use_mass_conservation=use_mass_conservation,
                )
            )
        self.topology_builder = SimpleStateTransitionTopologyBuilder(int_nodes)

        if reload_pdg or len(self.__particles) == 0:
            self.__particles = load_default_particles()

        self.__allowed_intermediate_particles = list()
        if allowed_intermediate_particles is not None:
            self.set_allowed_intermediate_particles(
                allowed_intermediate_particles
            )
        else:
            self.__allowed_intermediate_particles = [
                create_edge_properties(x) for x in self.__particles
            ]
Ejemplo n.º 21
0
 def test_init(particle_database: ParticleCollection):
     new_pdg = ParticleCollection(particle_database)
     assert new_pdg is not particle_database
     assert new_pdg == particle_database
     with pytest.raises(TypeError):
         ParticleCollection(1)  # type: ignore
Ejemplo n.º 22
0
 def test_neg(self, particle_database: ParticleCollection):
     pip = particle_database.find(211)
     pim = particle_database.find(-211)
     assert pip == -pim