コード例 #1
0
    def evaluate_kernel(self, xs, kTs, parent):

        # Retrieve the collinear variable x
        x = xs[0]
        kT = kTs[0]

        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'  : [None, ((parent, (kT, )), ), ],
            'color_correlations' : [None],
            'values'             : {(0, 0): {'finite': None},
                                    (1, 0): {'finite': None}, }
        })

        # The factor 'x' that should be part of the initial_state_crossing_factor cancels
        # against the extra prefactor 1/x in the collinear factorization formula
        # (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
        initial_state_crossing_factor = 1.
        # Correct for the ratio of color-averaging factor between the real ME
        # initial state flavor (quark) and the one of the reduced Born ME (gluon)
        initial_state_crossing_factor *= (self.NC**2-1)/float(self.NC)
        
        z = 1./x

        norm = initial_state_crossing_factor * self.TR
        # We re-use here the Altarelli-Parisi Kernel of the P_q\bar{q} final state kernel
        evaluation['values'][(0, 0)]['finite'] = norm
        evaluation['values'][(1, 0)]['finite'] = 4. * norm * z*(1.-z) / kT.square()

        return evaluation
コード例 #2
0
    def evaluate_kernel(self, zs, kTs, parent):

        # Retrieve the collinear variables and compute basic quantities
        z1, z2, z3 = zs
        s12 = sij(1, 2, zs, kTs)
        s13 = sij(1, 3, zs, kTs)
        s23 = sij(2, 3, zs, kTs)
        s123 = s12 + s13 + s23
        t123 = tijk(1, 2, 3, zs, kTs, s_ij=s12, s_ik=s13, s_jk=s23)
        # Assemble kernel
        sqrbrk = -(t123**2) / (s12 * s123)
        sqrbrk += (4 * z3 + (z1 - z2)**2) / (z1 + z2)
        sqrbrk += z1 + z2 - s12 / s123
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'values': {
                (0, 0): {
                    'finite': None
                }
            }
        })
        evaluation['values'][(
            0, 0)]['finite'] = self.CF * self.TR * s123 / (2 * s12) * sqrbrk
        return evaluation
コード例 #3
0
    def evaluate_kernel(self, zs, kTs, parent):

        # Retrieve the collinear variables and compute basic quantities
        z1, z2, z3 = zs
        k1, k2, k3 = kTs
        s12 = sij(1, 2, zs, kTs)
        s13 = sij(1, 3, zs, kTs)
        s23 = sij(2, 3, zs, kTs)
        s123 = s12 + s13 + s23

        # Assemble kernel
        kernel = self.evaluate_unsymmetrized_kernel((z1, z2, z3),
                                                    (s12, s13, s23, s123), kTs)
        kernel += self.evaluate_unsymmetrized_kernel(
            (z2, z1, z3), (s12, s23, s13, s123), (k2, k1, k3))

        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'values': {
                (0, 0): {
                    'finite': None
                }
            }
        })
        evaluation['values'][(0, 0)]['finite'] = kernel
        return evaluation
コード例 #4
0
    def evaluate_kernel(self, xs, kTs, parent):

        # Retrieve the collinear variable x
        x = xs[0]
        
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'  : [None],
            'color_correlations' : [None],
            'values'             : {(0, 0): {'finite': None}}
        })
        
        # The factor 'x' that should be part of the initial_state_crossing_factor cancels
        # against the extra prefactor 1/x in the collinear factorization formula
        # (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
        initial_state_crossing_factor = 1.
        # Correct for the ratio of color-averaging factor between the real ME
        # initial state flavor (quark) and the one of the reduced Born ME (quark)
        initial_state_crossing_factor *= 1.
        
        z = 1./x

        # We re-use here the Altarelli-Parisi Kernel of the P_qg final state kernel, including
        # its soft subtraction
        # We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
        # P_qg           = self.CF * ( (1.+z**2)/(1.-z) )
        # CxS(P_qg)      = self.CF * ( 2 / (x - 1) ) = self.CF * ( 2 z / (1 - z) )
        # P_qg-CxS(P_qg) = self.CF * (1 + z**2 - 2*z) / (1 - z) = self.CF * ( 1 - z)

        norm = initial_state_crossing_factor * self.CF
        evaluation['values'][(0, 0)]['finite'] = norm * (1 - z)

        return evaluation
コード例 #5
0
    def evaluate_kernel(self, zs, kTs, parent):

        # Retrieve the collinear variables
        z = zs[0]
        kT = kTs[0]
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'  : [None, ((parent,( kT, )), ), ],
            'color_correlations' : [None],
            'values'             : {(0, 0): {'finite': None},
                                    (1, 0): {'finite': None}, }
        })
        # The line below implements the g_{\mu\nu} part of the splitting kernel.
        # Notice that the extra longitudinal terms included in the spin-correlation 'None'
        # from the relation:
        #    \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
        #    = g^{\mu\nu} + longitudinal terms
        # are irrelevant because Ward identities evaluate them to zero anyway.

        # We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
        # P_gg           = 2.*self.CA * ( (1.-z) / z + z / (1.- z) )
        # CxS(P_gg)      = 2.*self.CA * ( (1.-z) / z + z / (1.- z) )
        # SxC(P_gg)      = 2.*self.CA * ( 1 / z + 1 / (1.- z) )
        # P_gg-CxS(P_gg) = 0
        # P_gg-SxC(P_gg) = -4.*self.CA

        evaluation['values'][(0, 0)]['finite'] = -4.*self.CA
        evaluation['values'][(1, 0)]['finite'] = -2.*self.CA * 2.*z*(1.-z) / kT.square()
        return evaluation
コード例 #6
0
    def kernel(self, zs, kTs, parent, reduced_kinematics):

        # Retrieve the collinear variables
        z = zs[0]
        kT = kTs[0]
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [
                None,
                ((parent, (kT, )), ),
            ],
            'color_correlations': [None],
            'reduced_kinematics': [reduced_kinematics],
            'values': {
                (0, 0, 0): {
                    'finite': None
                },
                (1, 0, 0): {
                    'finite': None
                },
            }
        })
        # Compute the kernel
        # The line below implements the g_{\mu\nu} part of the splitting kernel.
        # Notice that the extra longitudinal terms included in the spin-correlation 'None'
        # from the relation:
        #    \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
        #    = g^{\mu\nu} + longitudinal terms
        # are irrelevant because Ward identities evaluate them to zero anyway.
        evaluation['values'][(0, 0, 0)]['finite'] = \
            0.
        evaluation['values'][(1, 0, 0)]['finite'] = \
            -2. * self.CA * 2. * z * (1. - z) / kT.square()
        return evaluation
コード例 #7
0
    def evaluate_kernel(self, xs, kTs, parent):

        # Retrieve the collinear variable x
        x = xs[0]
        
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'  : [None],
            'color_correlations' : [None],
            'values'             : {(0, 0): {'finite': None}}
        })
        
        # The factor 'x' that should be part of the initial_state_crossing_factor cancels
        # against the extra prefactor 1/x in the collinear factorization formula
        # (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
        initial_state_crossing_factor = 1.
        # Correct for the ratio of color-averaging factor between the real ME
        # initial state flavor (gluon) and the one of the reduced Born ME (quark)
        initial_state_crossing_factor *= (self.NC/float(self.NC**2-1))
        
        z = 1./x

        norm = initial_state_crossing_factor * self.CF
        # We re-use here the Altarelli-Parisi Kernel of the P_gq final state kernel
        evaluation['values'][(0, 0)]['finite'] = norm * ( (1.+(1.-z)**2)/z )

        return evaluation
コード例 #8
0
    def evaluate_kernel(self, zs, kTs, parent):

        # Retrieve the collinear variables
        z = zs[0]
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'  : [None],
            'color_correlations' : [None],
            'values'             : {(0, 0): {'finite': None}}
        })
        evaluation['values'][(0, 0)]['finite'] = self.CF * (1.+(1.-z)**2)/z
        return evaluation
コード例 #9
0
    def soft_kernel(self, evaluation, colored_partons, all_steps_info, global_variables):
        """Evaluate a collinear type of splitting kernel, which *does* need to know about the reduced process
        Should be specialised by the daughter class if not dummy
        """

        new_evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations' : [ None, ],
            'color_correlations': [ ],
            'reduced_kinematics': evaluation['reduced_kinematics'],
            'values': { }
        })

        overall_lower_PS_point = all_steps_info[-1]['lower_PS_point']
        soft_leg_number = all_steps_info[-1]['bundles_info'][0]['final_state_children'][0]
        pr = all_steps_info[-1]['higher_PS_point'][soft_leg_number]
        colored_parton_numbers = sorted(colored_partons.keys())

        for i, a in enumerate(colored_parton_numbers):
            for b in colored_parton_numbers[i:]:
                # Write the eikonal for that pair
                if a!=b:
                    mult_factor = 1.
                else:
                    mult_factor = 1./2.

                pi = overall_lower_PS_point[a]
                pk = overall_lower_PS_point[b]
                composite_weight = EpsilonExpansion({'finite': 0.})
                for (sc, cc, rk), coll_weight in evaluation['values'].items():
                    if evaluation['spin_correlations'][sc] is None:
                        # We *subtract* here the contribution because the non-spin-correlated contributin is -g^{\mu\nu}
                        composite_weight -= SoftKernels.eikonal_g(self, pi, pk, pr, spin_corr_vector=None)*EpsilonExpansion(coll_weight)
                    else:
                        # Normally the collinear current should have built spin-correlations with the leg number corresponding
                        # to the soft one in the context of this soft current
                        assert len(evaluation['spin_correlations'][sc])==1
                        parent_number, spin_corr_vecs = evaluation['spin_correlations'][sc][0]
                        assert soft_leg_number==parent_number
                        assert len(spin_corr_vecs)==1
                        spin_corr_vec = spin_corr_vecs[0]
                        composite_weight += SoftKernels.eikonal_g(self, pi, pk, pr, spin_corr_vector=spin_corr_vec)*EpsilonExpansion(coll_weight)
                new_evaluation['color_correlations'].append( ((a, b), ) )
                new_evaluation['values'][(0,len(new_evaluation['color_correlations'])-1,0)] = composite_weight*mult_factor

        return new_evaluation
コード例 #10
0
    def evaluate_kernel(self, zs, kTs, parent):

        # Retrieve the collinear variables
        z = zs[0]
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'  : [None],
            'color_correlations' : [None],
            'values'             : {(0, 0): {'finite': None}}
        })
        # We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
        # P_gq           = self.CF * ((1.-z)**2 + 1.)/z
        # CxS(P_gq)      = self.CF * 2.*(1.-z) / z
        # SxC(P_gq)      = self.CF * 2. / z
        # P_gq-CxS(P_gq) = self.CF * z
        # P_gq-SxC(P_gq) = self.CF * ((1.-z)**2 - 1.)/z
        evaluation['values'][(0, 0)]['finite'] = self.CF * z
        return evaluation
コード例 #11
0
    def evaluate_kernel(self, xs, kTs, parent):

        # Retrieve the collinear variable x
        x = xs[0]
        kT = kTs[0]

        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'  : [None, ((parent,( kT, )), ), ],
            'color_correlations' : [None],
            'values'             : {(0, 0): {'finite': None},
                                    (1, 0): {'finite': None}, }
        })

        # The factor 'x' that should be part of the initial_state_crossing_factor cancels
        # against the extra prefactor 1/x in the collinear factorization formula
        # (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
        initial_state_crossing_factor = 1.
        # Correct for the ratio of color-averaging factor between the real ME
        # initial state flavor (gluon) and the one of the reduced Born ME (gluon)
        initial_state_crossing_factor *= 1.
        
        z = 1./x
    
        # The line below implements the g_{\mu\nu} part of the splitting kernel.
        # Notice that the extra longitudinal terms included in the spin-correlation 'None'
        # from the relation:
        #    \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
        #    = g^{\mu\nu} + longitudinal terms
        # are irrelevant because Ward identities evaluate them to zero anyway.

        # We re-use here the Altarelli-Parisi Kernel of the P_qg final state kernel, including
        # its soft subtraction
        # We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
        # P_gg           = 2.*self.CA * ( (z/(1.-z)) + ((1.-z)/z) )
        # CxS(P_gg)      = 2.*self.CA * ( (z/(1.-z)) )
        # P_gg-CxS(P_gg) = 2.*self.CA * ((1.-z)/z)

        norm = initial_state_crossing_factor * 2. * self.CA
        evaluation['values'][(0, 0)]['finite'] =  norm * ((1.-z)/z)
        evaluation['values'][(1, 0)]['finite'] = -norm * 2.*z*(1.-z) / kT.square()
        return evaluation
コード例 #12
0
    def kernel(self, zs, kTs, parent, reduced_kinematics):

        # Retrieve the collinear variables
        z = zs[0]
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'reduced_kinematics': [reduced_kinematics],
            'values': {
                (0, 0, 0): {
                    'finite': None
                }
            }
        })
        # Compute the kernel using
        # f9d0839fc58905d67367e3e67efabee05ee390f9:madgraph/iolibs/template_files/OLD_subtraction/cataniseymour/NLO/local_currents.py:146
        evaluation['values'][(0, 0, 0)]['finite'] = \
            self.CF*z
        return evaluation
コード例 #13
0
    def evaluate_kernel(self, zs, kTs, parent):

        # Retrieve the collinear variables and compute basic quantities
        z1, z2, z3 = zs
        s12 = sij(1, 2, zs, kTs)
        s13 = sij(1, 3, zs, kTs)
        s23 = sij(2, 3, zs, kTs)
        misc.sprint(s12, s13, s23)
        s123 = s12 + s13 + s23
        # Assemble kernel
        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'values': {}
        })
        ker = 0
        # ker += 2*C123(z1, z2, z3, s12, s13, s23, s123)
        evaluation['values'][(0, 0)] = {
            'finite': 0.5 * self.CF * self.TR * ker
        }
        return evaluation
コード例 #14
0
    def evaluate_kernel(self, xs, kTs, parent):

        # Retrieve the collinear variable x
        x = xs[0]
        kT = kTs[0]

        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'  : [None, ((parent, (kT, )), ), ],
            'color_correlations' : [None],
            'values'             : {(0, 0): {'finite': None},
                                    (1, 0): {'finite': None}, }
        })

        # The factor 'x' that should be part of the initial_state_crossing_factor cancels
        # against the extra prefactor 1/x in the collinear factorization formula
        # (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
        initial_state_crossing_factor = -1.
        # Correct for the ratio of color-averaging factor between the real ME
        # initial state flavor (quark) and the one of the reduced Born ME (gluon)
        initial_state_crossing_factor *= ((self.NC**2-1)/float(self.NC))
        
        z = 1./x

        # We re-use here the Altarelli-Parisi Kernel of the P_q\bar{q} final state kernel
        
        # The line below implements the g_{\mu\nu} part of the splitting kernel.
        # Notice that the extra longitudinal terms included in the spin-correlation 'None'
        # from the relation:
        #    \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
        #    = g^{\mu\nu} + longitudinal terms
        # are irrelevant because Ward identities evaluate them to zero anyway.

        norm = initial_state_crossing_factor * self.TR
        evaluation['values'][(0, 0)]['finite'] = norm
        evaluation['values'][(1, 0)]['finite'] = norm * 4. * z*(1.-z) / kT.square()

        return evaluation
コード例 #15
0
    def evaluate_subtraction_current(self,
                                     current,
                                     higher_PS_point=None,
                                     momenta_dict=None,
                                     reduced_process=None,
                                     hel_config=None,
                                     Q=None,
                                     **opts):

        if higher_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the higher phase-space point.")
        if momenta_dict is None:
            raise CurrentImplementationError(self.name() +
                                             " requires a momenta dictionary.")
        if reduced_process is None:
            raise CurrentImplementationError(self.name() +
                                             " requires a reduced_process.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment.")
        if Q is None:
            raise CurrentImplementationError(
                self.name() +
                " requires specification of the total incoming momentum Q.")

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Retrieve leg numbers
        children = tuple(self.leg_numbers_map[i]
                         for i in sorted(self.leg_numbers_map.keys()))
        parent = momenta_dict.inv[frozenset(children)]

        # Perform mapping
        self.mapping_singular_structure.legs = self.get_recoilers(
            reduced_process, excluded=(parent, ))

        lower_PS_point, mapping_vars = self.mapping.map_to_lower_multiplicity(
            higher_PS_point,
            self.mapping_singular_structure,
            momenta_dict,
            compute_jacobian=self.divide_by_jacobian)

        # Retrieve kinematics
        # The Q variable of the mapping cannot be relied upon
        #Q = mapping_vars['Q']
        if self.has_initial_state:
            pC = higher_PS_point[children[0]] - sum(higher_PS_point[child]
                                                    for child in children[1:])
        else:
            pC = sum(higher_PS_point[child] for child in children)

        qC = lower_PS_point[parent]
        jacobian = mapping_vars.get('jacobian', 1)
        reduced_kinematics = (None, lower_PS_point)

        # Include the counterterm only in a part of the phase space
        if self.has_initial_state:
            pA = higher_PS_point[children[0]]
            pR = sum(higher_PS_point[child] for child in children[1:])
            # Initial state collinear cut
            if self.is_cut(Q=Q, pA=pA, pR=pR):
                return utils.SubtractionCurrentResult.zero(
                    current=current,
                    hel_config=hel_config,
                    reduced_kinematics=('IS_CUT', lower_PS_point))
        else:
            # Final state collinear cut
            if self.is_cut(Q=Q, pC=pC):
                return utils.SubtractionCurrentResult.zero(
                    current=current,
                    hel_config=hel_config,
                    reduced_kinematics=('IS_CUT', lower_PS_point))

        # Evaluate kernel
        # First construct variables necessary for its evaluation
        # pass the mapping variables to the variables function, except for Q which is provided externally
        mapping_vars.pop('Q', None)
        kernel_arguments = self.variables(higher_PS_point,
                                          qC,
                                          children,
                                          Q=Q,
                                          **mapping_vars)

        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [
                None,
            ],
            'color_correlations': [
                None,
            ],
            'reduced_kinematics': [reduced_kinematics],
            'values': {}
        })
        evaluation = self.kernel(evaluation, parent, *kernel_arguments)

        # Add the normalization factors
        pC2 = pC.square()
        norm = (8. * math.pi * alpha_s / pC2)**(len(children) - 1)
        norm *= self.factor(Q=Q, pC=pC, qC=qC)
        norm /= jacobian
        for k in evaluation['values']:
            evaluation['values'][k]['finite'] *= norm

        # Construct and return result
        result = utils.SubtractionCurrentResult()
        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))
        return result
コード例 #16
0
    def evaluate_subtraction_current(
        self, current,
        higher_PS_point=None, momenta_dict=None, reduced_process=None,
        hel_config=None, Q=None, **opts ):

        if higher_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the phase-space points before mapping." )
        if momenta_dict is None:
            raise CurrentImplementationError(
                self.name() + " requires a momentum routing dictionary." )
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment." )
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q." )


        """Important note about the IF CS:
        - in this scheme we want to use "tilded" momenta for the dipole legs in eikonals. This is explicitly implemented in the soft local current
        - this implies that the correct form for the local C(ir)S(r) taken as the collinear limit of the eikonals is 
        1/ (p_r + p_i_tilde)^2 (1-z_r)/z_r where z_r = p_r.Q/(p_r+p_i_tilde).Q
        - Specializing to the case where the collinear partner of the soft particle is an initial state particle (i = a ), we have 
        p_a_tilde = xi p_a and 2p_a.Q = Q^2 so that the soft-collinear takes the form
        1/(p_r+xi p_a)^2 * xi/y_rQ where y_rQ is the usual Hungarian variable
        this simplifies to 
        1/(p_r+p_a)^2 * 1/y_rQ which is exactly the soft collinear as computed *without* tilded variables (i.e. exactly eq.5.29 of 0903.1218)

        As a result we use exactly the same way of evaluating the counterterms as in honest-to-god colorful.
        """

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        children = tuple(self.leg_numbers_map[i]
                         for i in sorted(self.leg_numbers_map.keys()))
        parent = momenta_dict.inv[frozenset(children)]

        # Perform mapping
        this_mapping_singular_structure = self.mapping_singular_structure.get_copy()
        this_mapping_singular_structure.legs = self.get_recoilers(reduced_process, excluded=(parent, ))
        lower_PS_point, mapping_vars = self.mapping.map_to_lower_multiplicity(
            higher_PS_point, this_mapping_singular_structure, momenta_dict,
            compute_jacobian=self.divide_by_jacobian )
        reduced_kinematics = (None, lower_PS_point)
        jacobian = mapping_vars.get('jacobian', 1.)

        # Include the counterterm only in a part of the phase space
        # children are the the set of particles that are going unresolved.
        # Here we have C and S going collinear with S soft.
        # The parent is the mapped C with a soft mapping, usually refered to as Ctilde.
        # S is just removed in a soft mapping.
        # Here S is a single particle but we obtain it as a list soft_children\
        # to illustrate how multiple softs would be obtained
        pCtilde = lower_PS_point[parent]
        soft_children = [ self.leg_numbers_map[soft_leg_number] for soft_leg_number in self.leg_numbers_map if soft_leg_number>9 ]
        pS = sum(higher_PS_point[child] for child in soft_children)
        collinear_final_children = [ self.leg_numbers_map[soft_leg_number] for soft_leg_number in self.leg_numbers_map if
                                                                                                0 < soft_leg_number <= 9 ]
        if len(collinear_final_children)>0:
            pCfinal = sum(higher_PS_point[child] for child in collinear_final_children)
        else:
            pCfinal = vectors.LorentzVector()
        pCinitial = higher_PS_point[self.leg_numbers_map[0]]
        pCmother = pCinitial - pCfinal - pS
        if self.is_cut(Q=Q, pC=pCmother, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current, hel_config=hel_config, reduced_kinematics=('IS_CUT', lower_PS_point))

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'reduced_kinematics': [reduced_kinematics],
            'values': { }
        })

        # Evaluate kernel
        kernel_arguments = self.variables(higher_PS_point, pCtilde, children, Q=Q)
#        kernel_arguments = self.variables(higher_PS_point, pCinitial, children, Q=Q)

        # There is no need for the ratio of color-averaging factor between the real ME
        # initial state flavor and the one of the reduced Born ME as they are either both
        # gluons or both quarks
        evaluation = self.kernel(evaluation, parent, *kernel_arguments)

        # Add the normalization factors
        norm = (8. * math.pi * alpha_s)**(len(soft_children)+len(collinear_final_children)) / ((2.*pCtilde.dot(pS))*pS.square())
        norm *= self.factor(Q=Q, pC=pCmother, pS=pS)
        if self.divide_by_jacobian:
            norm /= jacobian
        for k in evaluation['values']:
            evaluation['values'][k]['finite'] *= norm

        # Construct and return result
        result = utils.SubtractionCurrentResult()
        result.add_result(
            evaluation,
            hel_config=hel_config,
            squared_orders=tuple(sorted(current.get('squared_orders').items())))
        return result
コード例 #17
0
    def evaluate_subtraction_current(
            self, current,
            higher_PS_point=None, momenta_dict=None, reduced_process=None,
            hel_config=None, Q=None, **opts):

        if higher_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the phase-space points before and after mapping.")
        if momenta_dict is None:
            raise CurrentImplementationError(
                self.name() + " requires a momentum routing dictionary.")
        if reduced_process is None:
            raise CurrentImplementationError(
                self.name() + " requires a reduced_process.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment.")
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q.")

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Now find all colored leg numbers in the reduced process
        all_colored_parton_numbers = []
        for leg in reduced_process.get('legs'):
            if self.model.get_particle(leg.get('id')).get('color') == 1:
                continue
            all_colored_parton_numbers.append(leg.get('number'))
        soft_momenta = [ higher_PS_point[self.leg_numbers_map[soft_leg_number]] for soft_leg_number in
                                                                             self.leg_numbers_map if soft_leg_number>9 ]
        pS = sum(soft_momenta)

        # Perform mapping
        this_mapping_singular_structure = self.mapping_singular_structure.get_copy()
        this_mapping_singular_structure.legs = self.get_recoilers(reduced_process)
        lower_PS_point, mapping_vars = self.mapping.map_to_lower_multiplicity(
            higher_PS_point, this_mapping_singular_structure, momenta_dict,
            compute_jacobian=self.divide_by_jacobian)
        reduced_kinematics = (None, lower_PS_point)
        jacobian = mapping_vars.get('jacobian', 1.)

        # Include the counterterm only in a part of the phase space
        if self.is_cut(Q=Q, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current, hel_config=hel_config, reduced_kinematics=('IS_CUT', lower_PS_point))

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [],
            'reduced_kinematics': [reduced_kinematics, ],
            'values': {}
        })

        # Normalization factors
        norm = (8. * math.pi * alpha_s)**(len(soft_momenta))*(1./pS.square()**2)
        norm *= self.factor(Q=Q, pS=pS)
        if self.divide_by_jacobian:
            norm /= jacobian

        colored_partons_momenta = vectors.LorentzVectorDict()
        for colored_parton_number in all_colored_parton_numbers:
            # We want to used the reduced kinematics for our soft current
            colored_partons_momenta[colored_parton_number] = lower_PS_point[colored_parton_number]
            # Alternatively, the expression below would have given us the resolved one
            #colored_partons_momenta[colored_parton_number] = sum(higher_PS_point[child] for child in momenta_dict[colored_parton_number])

        color_correlation_index = 0
        for color_correlator, weight in self.soft_kernel(
                self, colored_partons_momenta, soft_momenta, all_colored_parton_numbers):
            evaluation['color_correlations'].append(color_correlator)
            complete_weight = weight*norm
            evaluation['values'][(0, color_correlation_index, 0)] = {'finite': complete_weight[0]}
            color_correlation_index += 1

        result = utils.SubtractionCurrentResult()
        result.add_result(
            evaluation,
            hel_config=hel_config,
            squared_orders=tuple(sorted(current.get('squared_orders').items())))
        return result
コード例 #18
0
    def evaluate_subtraction_current(
        self, current,
        higher_PS_point=None, momenta_dict=None, reduced_process=None,
        hel_config=None, **opts ):

        if higher_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the higher phase-space point." )
        if momenta_dict is None:
            raise CurrentImplementationError(
                self.name() + " requires a momenta dictionary." )
        if reduced_process is None:
            raise CurrentImplementationError(
                self.name() + " requires a reduced_process.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment." )

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r    = model_param_dict['MU_R']

        # Retrieve leg numbers
        soft_leg_number = self.leg_numbers_map[0]

        # Perform mapping
        self.mapping_singular_structure.legs = self.get_recoilers(reduced_process)
        lower_PS_point, mapping_vars = soft_mapping.map_to_lower_multiplicity(
            higher_PS_point, self.mapping_singular_structure, momenta_dict,
            compute_jacobian=self.divide_by_jacobian )

        # Retrieve kinematics
        Q = mapping_vars['Q']
        pS = higher_PS_point[soft_leg_number]
        jacobian = mapping_vars.get('jacobian', 1)

        # Include the counterterm only in a part of the phase space
        if self.is_cut(Q=Q, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current, hel_config=hel_config,
                reduced_kinematics=(None, lower_PS_point))

        # Normalization factors
        norm = -4 * math.pi * alpha_s
        norm *= self.factor(Q=Q, pS=pS)
        norm /= jacobian

        # Find all colored leg numbers in the reduced process
        all_colored_parton_numbers = []
        for leg in reduced_process.get('legs'):
            if self.model.get_particle(leg.get('id')).get('color') == 1:
                continue
            all_colored_parton_numbers.append(leg.get('number'))

        # Initialize the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [],
            'reduced_kinematics': [(None, lower_PS_point)],
            'values': {}
        })

        # Loop over colored parton number pairs (a, b)
        # and add the corresponding contributions to this current
        color_correlation_index = 0
        for i, a in enumerate(all_colored_parton_numbers):
            # Use the symmetry of the color correlation and soft current (a,b) <-> (b,a)
            for b in all_colored_parton_numbers[i:]:
                # Write the eikonal for that pair
                if a != b:
                    mult_factor = 2.
                else:
                    mult_factor = 1.
                pa = higher_PS_point[a]
                pb = higher_PS_point[b]
                eikonal = self.eikonal(pa, pb, pS)
                evaluation['color_correlations'].append( ((a, b), ) )
                evaluation['values'][(0, color_correlation_index, 0)] = {
                    'finite': norm * mult_factor * eikonal }
                color_correlation_index += 1
        
        result = utils.SubtractionCurrentResult()
        result.add_result(
            evaluation,
            hel_config=hel_config,
            squared_orders=tuple(sorted(current.get('squared_orders').items())) )
        return result
コード例 #19
0
    def evaluate_subtraction_current(self,
                                     current,
                                     higher_PS_point=None,
                                     lower_PS_point=None,
                                     leg_numbers_map=None,
                                     reduced_process=None,
                                     hel_config=None,
                                     Q=None,
                                     **opts):
        if higher_PS_point is None or lower_PS_point is None:
            raise CurrentImplementationError(
                self.name() +
                " needs the phase-space points before and after mapping.")
        if leg_numbers_map is None:
            raise CurrentImplementationError(
                self.name() +
                " requires a leg numbers map, i.e. a momentum dictionary.")
        if reduced_process is None:
            raise CurrentImplementationError(self.name() +
                                             " requires a reduced_process.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment.")
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q.")

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Now find all colored leg numbers in the reduced process
        all_colored_parton_numbers = []
        for leg in reduced_process.get('legs'):
            if self.model.get_particle(leg.get('id')).get('color') == 1:
                continue
            all_colored_parton_numbers.append(leg.get('number'))

        # Identify the soft leg numbers and momenta
        soft_leg_number_A = current.get('singular_structure').legs[0].n
        soft_leg_number_B = current.get('singular_structure').legs[1].n
        pA = higher_PS_point[soft_leg_number_A]
        pB = higher_PS_point[soft_leg_number_B]
        pS = pA + pB

        # Include the counterterm only in a part of the phase space
        if self.is_cut(Q=Q, pS=pS):
            return utils.SubtractionCurrentResult.zero(current=current,
                                                       hel_config=hel_config)

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [],
            'values': {}
        })

        # Normalization factors
        couplings_factors = 4. * math.pi * alpha_s
        norm = couplings_factors**2 * self.factor(Q=Q, pS=pS)

        # Keep track of the color correlators added
        color_correlators_added = {}
        color_correlation_max_index = 0
        # Now loop over the colored parton numbers to implement the squared double-soft
        # current flow. Note that significant improvement can be obtained by taking advantage
        # of the symmetries, as well as writing fully expanded hard-coded expressions with
        # all dot-products cached.
        # For efficiency, we choose here to define coefficients of each single product of
        # correlators. So, typically in the abelian piece, the color correlator reads:
        #       ( '{}' denotes an anti-commutator below and '.' denotes a dot-product )
        #    { T_i . T_j, T_k . T_l }
        # And we will register *two* entries:
        #    evaluation['color_correlations'].append(
        #      (
        #         ( ( (i,-1,i), (k,-2,k) ), ( (j,-1,j), (l,-2,l) ) ),
        #      )
        #    )
        # And:
        #    evaluation['color_correlations'].append(
        #      (
        #         ( ( (i,-2,i), (k,-1,k) ), ( (j,-2,j), (l,-1,l) ) ),
        #      )
        #    )
        # As opposed to directly defining their sum:
        #    evaluation['color_correlations'].append(
        #            (
        #                    ( ( (i,-1,i), (k,-2,k) ), ( (j,-1,j), (l,-2,l) ) ),
        #                    ( ( (i,-2,i), (k,-1,k) ), ( (j,-2,j), (l,-1,l) ) )
        #            )
        #    )
        for i in all_colored_parton_numbers:
            for j in all_colored_parton_numbers:

                # Compute the non-abelian eikonal
                pi = sum(higher_PS_point[child]
                         for child in leg_numbers_map[i])
                pj = sum(higher_PS_point[child]
                         for child in leg_numbers_map[j])
                # pi = lower_PS_point[i]
                # pj = lower_PS_point[j]
                non_abelian_eikonal = self.non_abelian_eikonal(pi, pj, pA, pB)
                non_abelian_kernel = -self.CA * norm * non_abelian_eikonal

                # Implement the non-abelian piece
                non_abelian_correlator = ((((i, -1, i), ), ((j, -1, j), )), )
                if non_abelian_correlator in color_correlators_added:
                    color_correlation_index = color_correlators_added[
                        non_abelian_correlator]
                    evaluation['values'][(0, color_correlation_index
                                          )]['finite'] += non_abelian_kernel
                else:
                    evaluation['color_correlations'].append(
                        non_abelian_correlator)
                    color_correlation_index = color_correlation_max_index
                    color_correlators_added[
                        non_abelian_correlator] = color_correlation_max_index
                    color_correlation_max_index += 1
                    evaluation['values'][(0, color_correlation_index)] = {
                        'finite': non_abelian_kernel
                    }

                for k in all_colored_parton_numbers:
                    for l in all_colored_parton_numbers:

                        # Compute the abelian eikonal
                        pk = sum(higher_PS_point[child]
                                 for child in leg_numbers_map[k])
                        pl = sum(higher_PS_point[child]
                                 for child in leg_numbers_map[l])
                        # pk = lower_PS_point[k]
                        # pl = lower_PS_point[l]
                        eik_ij = self.eikonal(pi, pj, pA)
                        eik_kl = self.eikonal(pk, pl, pB)
                        abelian_kernel = 0.5 * norm * eik_ij * eik_kl

                        # Implement the abelian piece
                        abelian_correlator_A = (
                            self.create_CataniGrazzini_correlator((i, j),
                                                                  (k, l)), )
                        abelian_correlator_B = (
                            self.create_CataniGrazzini_correlator((k, l),
                                                                  (i, j)), )

                        for correlator in [
                                abelian_correlator_A, abelian_correlator_B
                        ]:
                            if correlator in color_correlators_added:
                                color_correlation_index = color_correlators_added[
                                    correlator]
                                #misc.sprint('Adding %f ((%d,%d,%d)->%f, (%d,%d,%d)->%f) to CC: %d, %s'%\
                                #            (abelian_kernel,
                                #             i,j,soft_leg_number_A,self.eikonal(PS_point, i, j, soft_leg_number_A),
                                #             k,l,soft_leg_number_B,self.eikonal(PS_point, k, l, soft_leg_number_B),
                                #             color_correlation_index, str(correlator)
                                #            ))
                                evaluation['values'][(
                                    0, color_correlation_index
                                )]['finite'] += abelian_kernel
                            else:
                                evaluation['color_correlations'].append(
                                    correlator)
                                color_correlation_index = color_correlation_max_index
                                color_correlators_added[
                                    correlator] = color_correlation_max_index
                                color_correlation_max_index += 1
                                evaluation['values'][(
                                    0, color_correlation_index)] = {
                                        'finite': abelian_kernel
                                    }
                                #misc.sprint('Adding %f ((%d,%d,%d)->%f, (%d,%d,%d)->%f) to CC: %d, %s'%\
                                #            (abelian_kernel,
                                #             i,j,soft_leg_number_A,self.eikonal(PS_point, i, j, soft_leg_number_A),
                                #             k,l,soft_leg_number_B,self.eikonal(PS_point, k, l, soft_leg_number_B),
                                #             color_correlation_index, str(correlator)
                                #            ))

        result = utils.SubtractionCurrentResult()
        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))

        #misc.sprint('==BELOW LIST CC==')
        #for i, cc in enumerate(evaluation['color_correlations']):
        #    misc.sprint('Color correlator: %d : %s = %f'%(i, cc, evaluation['values'][(0,i)]['finite']))
        #misc.sprint('==ABOVE LIST CC==')

        return result
コード例 #20
0
    def evaluate_kernel(self, zs, kTs, parent):
        #misc.sprint("HELLO")

        # Retrieve the collinear variables and compute basic quantities
        z1, z2, z3 = zs
        k1, k2, k3 = kTs
        s12 = sij(1, 2, zs, kTs)
        s13 = sij(1, 3, zs, kTs)
        s23 = sij(2, 3, zs, kTs)
        s123 = s12 + s13 + s23
        CA = self.CA

        k12 = k1 + k2
        k23 = k2 + k3
        k13 = k1 + k3
        u12 = k1 / z1 - k2 / z2
        u23 = k2 / z2 - k3 / z3
        u31 = k1 / z1 - k3 / z3

        # Assemble kernel
        kernel_gmunu = self.evaluate_permuted_gmunu(z1, z2, z3, s12, s13, s23)

        kernel_u12mu_u12nu = (8 * CA**2 * s123 * z1**2 *
                              z2**2) / (s12**2 * (1 - z3) * z3)

        kernel_u23mu_u23nu = (8 * CA**2 * s123 * z2**2 *
                              z3**2) / (s23**2 * (1 - z1) * z1)

        kernel_u13mu_u13nu = (8 * CA**2 * s123 * z1**2 *
                              z3**2) / (s13**2 * (1 - z2) * z2)

        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [
                None,
                ((parent, (k1, )), ),
                ((parent, (k2, )), ),
                ((parent, (k3, )), ),
                ((parent, (u12, )), ),
                ((parent, (u23, )), ),
                ((parent, (u31, )), ),
            ],
            'color_correlations': [None],
            'values': {
                (0, 0): {
                    'finite': -kernel_gmunu
                },  # for the minus sign: Madgraph multiplies this by -g_mu_nu
                (1, 0): {
                    'finite': self.evaluatecoefk1k1(z1, z2, z3, s12, s13, s23)
                },
                (2, 0): {
                    'finite': self.evaluatecoefk2k2(z1, z2, z3, s12, s13, s23)
                },
                (3, 0): {
                    'finite': self.evaluatecoefk3k3(z1, z2, z3, s12, s13, s23)
                },
                (4, 0): {
                    'finite': kernel_u12mu_u12nu
                },
                (5, 0): {
                    'finite': kernel_u23mu_u23nu
                },
                (6, 0): {
                    'finite': kernel_u13mu_u13nu
                },
            }
        })

        return evaluation
コード例 #21
0
    def evaluate_kernel(self, zs, kTs, parent):

        # Retrieve the collinear variables and compute basic quantities
        z1, z2, z3 = zs
        k1, k2, k3 = kTs
        s12 = sij(1, 2, zs, kTs)
        s13 = sij(1, 3, zs, kTs)
        s23 = sij(2, 3, zs, kTs)
        s123 = s12 + s13 + s23
        CF = self.CF
        CA = self.CA
        TR = self.TR
        t231 = tijk(2, 3, 1, zs, kTs, s_ij=s23, s_ik=s12, s_jk=s13)
        t321 = tijk(3, 2, 1, zs, kTs, s_ij=s23, s_ik=s13, s_jk=s12)

        k23 = k2 + k3
        u23 = k2 / z2 - k3 / z3

        # Assemble kernel
        kernel_gmunu = (CF * TR * (2 - s23**2 / (s12 * s13) - s123**2 /
                                   (s12 * s13)) + CA * TR *
                        (0.5 + s123**2 / (s12 * s13) + t231**2 /
                         (4. * s23**2) + t321**2 / (4. * s23**2) + s123 /
                         (s23 * (1 - z1)) - s123 / (2. * s12 *
                                                    (1 - z1) * z1) - s123 /
                         (2. * s13 * (1 - z1) * z1) - s123 / (s23 *
                                                              (1 - z1) * z1) -
                         (2 * s123 * z1) / (s23 * (1 - z1)) + (s123**2 * z2) /
                         (s12 * s23 * (1 - z1)) + (s123 * z2) /
                         (2. * s13 * (1 - z1) * z1) - (s123**2 * z2) /
                         (2. * s12 * s23 * (1 - z1) * z1) + (s123**2 * z3) /
                         (s13 * s23 * (1 - z1)) + (s123 * z3) /
                         (2. * s12 * (1 - z1) * z1) - (s123**2 * z3) /
                         (2. * s13 * s23 * (1 - z1) * z1)))
        kernel_k1mu_k1nu = ((2 * CA * TR * s123) / (s12 * s13) -
                            (4 * CF * TR * s123) / (s12 * s13))
        kernel_k2mu_k2nu = ((-4 * CF * TR * s123) / (s12 * s13) + CA * TR *
                            (((4 * s123) / (s12 * s13) +
                              (s123 *
                               ((-16 * z3**2) / ((1 - z1) * z1) - 4 *
                                (1 + (2 * (-z1 + z2) * z3) /
                                 ((1 - z1) * z1)))) / (s13 * s23)) / 4. +
                             ((4 * s123) / (s12 * s13) +
                              (s123 * (8 - 4 * (1 + (2 * z2 * (-z1 + z3)) /
                                                ((1 - z1) * z1)))) /
                              (s12 * s23)) / 4.))
        kernel_k3mu_k3nu = ((-4 * CF * TR * s123) / (s12 * s13) + CA * TR *
                            (((4 * s123) / (s12 * s13) +
                              (s123 * (8 - 4 * (1 + (2 * (-z1 + z2) * z3) /
                                                ((1 - z1) * z1)))) /
                              (s13 * s23)) / 4. +
                             ((4 * s123) / (s12 * s13) +
                              (s123 *
                               ((-16 * z2**2) / ((1 - z1) * z1) - 4 *
                                (1 + (2 * z2 * (-z1 + z3)) /
                                 ((1 - z1) * z1)))) / (s12 * s23)) / 4.))
        kernel_k23mu_k23nu = ((4 * CF * TR * s123) / (s12 * s13) + CA * TR *
                              (((-4 * s123) / (s12 * s13) +
                                (4 * s123 *
                                 (1 + (2 * (-z1 + z2) * z3) /
                                  ((1 - z1) * z1))) / (s13 * s23)) / 4. +
                               ((-4 * s123) / (s12 * s13) +
                                (4 * s123 *
                                 (1 + (2 * z2 * (-z1 + z3)) /
                                  ((1 - z1) * z1))) / (s12 * s23)) / 4.))

        kernel_u23mu_u23nu = ((-8 * CA * TR * s123 * z2**2 * z3**2) /
                              (s23**2 * (1 - z1) * z1))

        # Instantiate the structure of the result
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [
                None,
                ((parent, (k1, )), ),
                ((parent, (k2, )), ),
                ((parent, (k3, )), ),
                ((parent, (k23, )), ),
                ((parent, (u23, )), ),
            ],
            'color_correlations': [None],
            'values': {
                (0, 0): {
                    'finite': -kernel_gmunu
                },  # for the minus sign: Madgraph multiplies this by -g_mu_nu
                (1, 0): {
                    'finite': kernel_k1mu_k1nu
                },
                (2, 0): {
                    'finite': kernel_k2mu_k2nu
                },
                (3, 0): {
                    'finite': kernel_k3mu_k3nu
                },
                (4, 0): {
                    'finite': kernel_k23mu_k23nu
                },
                (5, 0): {
                    'finite': kernel_u23mu_u23nu
                },
            }
        })

        return evaluation
コード例 #22
0
    def evaluate_subtraction_current(self,
                                     current,
                                     higher_PS_point=None,
                                     momenta_dict=None,
                                     reduced_process=None,
                                     hel_config=None,
                                     Q=None,
                                     **opts):

        if higher_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the phase-space points before mapping.")
        if momenta_dict is None:
            raise CurrentImplementationError(
                self.name() + " requires a momentum routing dictionary.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment.")
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q.")

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Perform mapping
        this_mapping_singular_structure = self.mapping_singular_structure.get_copy(
        )
        this_mapping_singular_structure.legs = self.get_recoilers(
            reduced_process)
        lower_PS_point, mapping_vars = self.mapping.map_to_lower_multiplicity(
            higher_PS_point,
            this_mapping_singular_structure,
            momenta_dict,
            compute_jacobian=self.divide_by_jacobian)
        reduced_kinematics = (None, lower_PS_point)

        # Include the counterterm only in a part of the phase space
        children = tuple(self.leg_numbers_map[i]
                         for i in sorted(self.leg_numbers_map.keys()))
        pC_child = higher_PS_point[children[0]]
        pS = higher_PS_point[children[1]]
        parent = momenta_dict.inv[frozenset(children)]
        if self.is_cut(Q=Q, pC=pC_child, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current,
                hel_config=hel_config,
                reduced_kinematics=('IS_CUT', lower_PS_point))
        pC_mother = pC_child - pS
        pC_tilde = lower_PS_point[parent]

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'reduced_kinematics': [reduced_kinematics],
            'values': {
                (0, 0, 0): {
                    'finite': None
                }
            }
        })

        # Evaluate kernel
        xs, kTs = self.variables(higher_PS_point, pC_tilde, children, Q=Q)
        x = xs[0]
        # See Eq. (4.17) of NNLO compatible NLO scheme publication arXiv:0903.1218v2

        # There is no need for the ratio of color-averaging factor between the real ME
        # initial state flavor and the one of the reduced Born ME as they are either both
        # gluons or both quarks

        evaluation['values'][(0, 0,
                              0)]['finite'] = self.color_charge * (2. /
                                                                   (1. - x))

        # Add the normalization factors
        # Note: normalising with (pC_tilde+pS).square() will *not* work!
        norm = 8. * math.pi * alpha_s / (pC_child + pS).square()
        norm *= self.factor(Q=Q, pC=pC_mother, pS=pS)
        for k in evaluation['values']:
            evaluation['values'][k]['finite'] *= norm

        # Construct and return result
        result = utils.SubtractionCurrentResult()
        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))
        return result
コード例 #23
0
    def evaluate_subtraction_current(
        self, current,
        higher_PS_point=None, lower_PS_point=None,
        leg_numbers_map=None, reduced_process=None, hel_config=None,
        Q=None, **opts ):
        if higher_PS_point is None or lower_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the phase-space points before and after mapping." )
        if leg_numbers_map is None:
            raise CurrentImplementationError(
                self.name() + " requires a leg numbers map, i.e. a momentum dictionary." )
        if reduced_process is None:
            raise CurrentImplementationError(
                self.name() + " requires a reduced_process.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment." )
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q." )

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r    = model_param_dict['MU_R']

        # Now find all colored leg numbers in the reduced process
        all_colored_parton_numbers = []
        for leg in reduced_process.get('legs'):
            if self.model.get_particle(leg.get('id')).get('color')==1:
                continue
            all_colored_parton_numbers.append(leg.get('number'))
        soft_leg_number = current.get('singular_structure').legs[0].n

        pS = higher_PS_point[soft_leg_number]

        # Include the counterterm only in a part of the phase space
        if self.is_cut(Q=Q, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current, hel_config=hel_config)

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'   : [ None ],
            'color_correlations'  : [],
            'values'              : {}
        })
        
        # Normalization factors
        norm = -4. * math.pi * alpha_s
        norm *= self.factor(Q=Q, pS=pS)

        color_correlation_index = 0
        # Now loop over the colored parton number pairs (a,b)
        # and add the corresponding contributions to this current
        for i, a in enumerate(all_colored_parton_numbers):
            # Use the symmetry of the color correlation and soft current (a,b) <-> (b,a)
            for b in all_colored_parton_numbers[i:]:
                # Write the eikonal for that pair
                if a!=b:
                    mult_factor = 2.
                else:
                    mult_factor = 1.
                pa = sum(higher_PS_point[child] for child in leg_numbers_map[a])
                pb = sum(higher_PS_point[child] for child in leg_numbers_map[b])
                # pa = lower_PS_point[a]
                # pb = lower_PS_point[b]
                eikonal = self.eikonal(pa, pb, pS)
                evaluation['color_correlations'].append( ((a, b), ) )
                evaluation['values'][(0, color_correlation_index)] = {
                    'finite': norm * mult_factor * eikonal }
                color_correlation_index += 1
        
        result = utils.SubtractionCurrentResult()
        result.add_result(
            evaluation,
            hel_config=hel_config,
            squared_orders=tuple(sorted(current.get('squared_orders').items())) )
        return result
コード例 #24
0
    def evaluate_subtraction_current(self,
                                     current,
                                     higher_PS_point=None,
                                     momenta_dict=None,
                                     reduced_process=None,
                                     hel_config=None,
                                     Q=None,
                                     **opts):

        if higher_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the phase-space points before mapping.")
        if momenta_dict is None:
            raise CurrentImplementationError(
                self.name() + " requires a momentum routing dictionary.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment.")
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q.")

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        children = tuple(self.leg_numbers_map[i]
                         for i in sorted(self.leg_numbers_map.keys()))
        parent = momenta_dict.inv[frozenset(children)]

        # Perform mapping
        this_mapping_singular_structure = self.mapping_singular_structure.get_copy(
        )
        this_mapping_singular_structure.legs = self.get_recoilers(
            reduced_process, excluded=(parent, ))
        lower_PS_point, mapping_vars = self.mapping.map_to_lower_multiplicity(
            higher_PS_point,
            this_mapping_singular_structure,
            momenta_dict,
            compute_jacobian=self.divide_by_jacobian)
        reduced_kinematics = (None, lower_PS_point)

        # Include the counterterm only in a part of the phase space
        # children are the the set of particles that are going unresolved.
        # Here we have C and S going collinear with S soft.
        # The parent is the mapped C with a soft mapping, usually refered to as Ctilde.
        # S is just removed in a soft mapping.
        # Here S is a single particle but we obtain it as a list soft_children\
        # to illustrate how multiple softs would be obtained
        pCtilde = lower_PS_point[parent]
        soft_children = [
            self.leg_numbers_map[1],
        ]

        pS = sum(higher_PS_point[child] for child in soft_children)
        if self.is_cut(Q=Q, pC=pCtilde, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current,
                hel_config=hel_config,
                reduced_kinematics=('IS_CUT', lower_PS_point))

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'reduced_kinematics': [
                reduced_kinematics,
            ],
            'values': {
                (0, 0, 0): {
                    'finite': None
                }
            }
        })

        # Evaluate kernel
        zs = self.variables([pS, pCtilde], Q)
        z = zs[0]
        evaluation['values'][(
            0, 0, 0)]['finite'] = self.color_charge * 2. * (1. - z) / z

        # Add the normalization factors
        s12 = (pCtilde + pS).square()
        norm = 8. * math.pi * alpha_s / s12
        norm *= self.factor(Q=Q, pC=pCtilde, pS=pS)
        for k in evaluation['values']:
            evaluation['values'][k]['finite'] *= norm

        # Construct and return result
        result = utils.SubtractionCurrentResult()
        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))
        return result
コード例 #25
0
    def evaluate_subtraction_current(self,
                                     current,
                                     higher_PS_point=None,
                                     momenta_dict=None,
                                     reduced_process=None,
                                     hel_config=None,
                                     Q=None,
                                     **opts):

        if higher_PS_point is None:
            raise CurrentImplementationError(
                self.name() +
                " needs the phase-space points before and after mapping.")
        if momenta_dict is None:
            raise CurrentImplementationError(
                self.name() + " requires a momentum routing dictionary.")
        if reduced_process is None:
            raise CurrentImplementationError(self.name() +
                                             " requires a reduced_process.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment.")
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q.")
        """Important note about the IF CS:
        - in this scheme we want to use "tilded" momenta for the dipole legs in eikonals. This is explicitly implemented in the soft local current
        - this implies that the correct form for the local C(ir)S(r) taken as the collinear limit of the eikonals is 
        1/ (p_r + p_i_tilde)^2 (1-z_r)/z_r where z_r = p_r.Q/(p_r+p_i_tilde).Q
        - Specializing to the case where the collinear partner of the soft particle is an initial state particle (i = a ), we have 
        p_a_tilde = xi p_a and 2p_a.Q = Q^2 so that the soft-collinear takes the form
        1/(p_r+xi p_a)^2 * xi/y_rQ where y_rQ is the usual Hungarian variable
        this simplifies to 
        1/(p_r+p_a)^2 * 1/y_rQ which is exactly the soft collinear as computed *without* tilded variables (i.e. exactly eq.5.29 of 0903.1218)
        
        As a result we use exactly the same way of evaluating the counterterms as in honest-to-god colorful.
        """

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Now find all colored leg numbers in the reduced process
        all_colored_parton_numbers = []
        for leg in reduced_process.get('legs'):
            if self.model.get_particle(leg.get('id')).get('color') == 1:
                continue
            all_colored_parton_numbers.append(leg.get('number'))
        soft_leg_number = self.leg_numbers_map[0]

        pS = higher_PS_point[soft_leg_number]

        # Perform mapping
        this_mapping_singular_structure = self.mapping_singular_structure.get_copy(
        )
        this_mapping_singular_structure.legs = self.get_recoilers(
            reduced_process)
        lower_PS_point, mapping_vars = self.mapping.map_to_lower_multiplicity(
            higher_PS_point,
            this_mapping_singular_structure,
            momenta_dict,
            compute_jacobian=self.divide_by_jacobian)
        reduced_kinematics = (None, lower_PS_point)

        # Include the counterterm only in a part of the phase space
        if self.is_cut(Q=Q, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current,
                hel_config=hel_config,
                reduced_kinematics=('IS_CUT', lower_PS_point))

        # Retrieve kinematics
        pS = higher_PS_point[soft_leg_number]
        jacobian = mapping_vars.get('jacobian', 1.)

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [],
            'reduced_kinematics': [
                reduced_kinematics,
            ],
            'values': {}
        })

        # Normalization factors
        norm = -4. * math.pi * alpha_s
        norm *= self.factor(Q=Q, pS=pS)
        if self.divide_by_jacobian:
            norm /= jacobian

        color_correlation_index = 0
        # Now loop over the colored parton number pairs (a,b)
        # and add the corresponding contributions to this current
        for i, a in enumerate(all_colored_parton_numbers):
            # Use the symmetry of the color correlation and soft current (a,b) <-> (b,a)
            for b in all_colored_parton_numbers[i:]:
                # Write the eikonal for that pair
                if a != b:
                    mult_factor = 2.
                else:
                    mult_factor = 1.
                #pa = sum(higher_PS_point[child] for child in momenta_dict[a])
                #pb = sum(higher_PS_point[child] for child in momenta_dict[b])
                pa = lower_PS_point[a]
                pb = lower_PS_point[b]
                eikonal = self.eikonal(pa, pb, pS)
                evaluation['color_correlations'].append(((a, b), ))
                evaluation['values'][(0, color_correlation_index, 0)] = {
                    'finite': norm * mult_factor * eikonal
                }
                color_correlation_index += 1

        result = utils.SubtractionCurrentResult()
        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))
        return result
コード例 #26
0
    def evaluate_subtraction_current(
        self, current,
        higher_PS_point=None, lower_PS_point=None,
        leg_numbers_map=None, reduced_process=None, hel_config=None,
        Q=None, **opts ):
        if higher_PS_point is None or lower_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the phase-space points before and after mapping." )
        if leg_numbers_map is None:
            raise CurrentImplementationError(
                self.name() + " requires a leg numbers map, i.e. a momentum dictionary." )
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment." )
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q." )

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Include the counterterm only in a part of the phase space
        children = self.get_sorted_children(current, self.model)
        pC = sum(higher_PS_point[child] for child in children)
        soft_children = []
        for substructure in current.get('singular_structure').substructures:
            soft_children += [leg.n for leg in substructure.get_all_legs()]
        pS = sum(higher_PS_point[child] for child in soft_children)
        parent = leg_numbers_map.inv[frozenset(children)]
        if self.is_cut(Q=Q, pC=pC, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current, hel_config=hel_config)

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'values': {(0, 0): {'finite': None}}
        })

        # Evaluate kernel
        zs, kTs = self.variables(higher_PS_point, lower_PS_point[parent], children, Q=Q)
        z = zs[0]
        evaluation['values'][(0, 0)]['finite'] = self.color_charge * 2.*(1.-z) / z

        # Add the normalization factors
        s12 = pC.square()
        norm = 8. * math.pi * alpha_s / s12
        norm *= self.factor(Q=Q, pC=pC, pS=pS)
        for k in evaluation['values']:
            evaluation['values'][k]['finite'] *= norm

        # Construct and return result
        result = utils.SubtractionCurrentResult()
        result.add_result(
            evaluation,
            hel_config=hel_config,
            squared_orders=tuple(sorted(current.get('squared_orders').items())))
        return result
コード例 #27
0
    def evaluate_integrated_current(self,
                                    current,
                                    PS_point,
                                    reduced_process=None,
                                    leg_numbers_map=None,
                                    hel_config=None,
                                    compute_poles=False,
                                    **opts):
        """ Now evalaute the current and return the corresponding instance of
        SubtractionCurrentResult. See documentation of the mother function for more details."""

        if not hel_config is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s does not support helicity assignment." %
                self.__class__.__name__)
        if leg_numbers_map is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s requires the leg_number_map." % self.__class__.__name__)
        if reduced_process is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s requires the reduced_process." % self.__class__.__name__)

        result = utils.SubtractionCurrentResult()

        ss = current.get('singular_structure').substructures[0]

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Retrieve kinematic variables from the specified PS point
        children_numbers = tuple(leg.n for leg in ss.legs)
        parent_number = leg_numbers_map.inv[frozenset(children_numbers)]

        p12 = PS_point[parent_number]
        Q = sum([
            PS_point[l.get('number')]
            for l in reduced_process.get_initial_legs()
        ])
        Q_square = Q.square()
        y12 = 2. * Q.dot(p12) / Q_square

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'values': {
                (0, 0): {}
            }
        })

        #Virtuality cut in the integration
        alpha_0 = currents.SomogyiChoices.alpha_0
        finite_part = HE.CggFF_Finite_Gabor_DIVJAC_NOD0(alpha_0, y12)

        value = EpsilonExpansion({
            0: finite_part,
            -1: (11. / 3. - 4. * math.log(y12)),
            -2: 2.
        })

        logMuQ = math.log(mu_r**2 / Q_square)

        prefactor = EpsilonExpansion({0: 1., 1: logMuQ, 2: 0.5 * logMuQ**2})
        prefactor *= self.SEpsilon

        # Now add the normalization factors
        value *= prefactor * (alpha_s / (2. * math.pi)) * self.CA
        # Truncate expansion so as to keep only relevant terms
        value.truncate(min_power=-2, max_power=0)

        # Now register the value in the evaluation
        evaluation['values'][(0, 0)] = value.to_human_readable_dict()

        # And add it to the results
        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))

        return result
コード例 #28
0
    def evaluate_subtraction_current(
        self, current,
        higher_PS_point=None, lower_PS_point=None,
        leg_numbers_map=None, reduced_process=None, hel_config=None,
        Q=None, **opts ):
        if higher_PS_point is None or lower_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the phase-space points before and after mapping." )
        if leg_numbers_map is None:
            raise CurrentImplementationError(
                self.name() + " requires a leg numbers map, i.e. a momentum dictionary." )
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment." )
        if Q is None:
            raise CurrentImplementationError(
                self.name() + " requires the total mapping momentum Q." )

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Include the counterterm only in a part of the phase space
        children = self.get_sorted_children(current, self.model)
        pC = higher_PS_point[children[0]]
        pS = higher_PS_point[children[1]]
        pC = pC + pS
        parent = leg_numbers_map.inv[frozenset(children)]
        if self.is_cut(Q=Q, pC=pC, pS=pS):
            return utils.SubtractionCurrentResult.zero(
                current=current, hel_config=hel_config)

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'values': {(0, 0): {'finite': None}}
        })

        # Evaluate kernel
        xs, kTs = self.variables(higher_PS_point, lower_PS_point[parent], children, Q=Q)
        x = xs[0]
    
        # See Eq. (4.17) of NNLO compatible NLO scheme publication arXiv:0903.1218v2

        # There is no need for the ratio of color-averaging factor between the real ME
        # initial state flavor and the one of the reduced Born ME as they are either both
        # gluons or both quarks
        
        evaluation['values'][(0, 0)]['finite'] = self.color_charge * ( 2. / (1. - x) )

        # Add the normalization factors
        s12 = pC.square()
        norm = 8. * math.pi * alpha_s / s12
        norm *= self.factor(Q=Q, pC=pC, pS=pS)
        for k in evaluation['values']:
            evaluation['values'][k]['finite'] *= norm

        # Construct and return result
        result = utils.SubtractionCurrentResult()
        result.add_result(
            evaluation,
            hel_config=hel_config,
            squared_orders=tuple(sorted(current.get('squared_orders').items())))
        return result
コード例 #29
0
    def evaluate_integrated_current(self,
                                    current,
                                    PS_point,
                                    reduced_process=None,
                                    leg_numbers_map=None,
                                    hel_config=None,
                                    compute_poles=False,
                                    **opts):
        """ Evaluates this current and return the corresponding instance of
        SubtractionCurrentResult. See documentation of the mother function for more details."""

        if not hel_config is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s does not support helicity assignment." %
                self.__class__.__name__)

        if leg_numbers_map is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s requires the leg_number_map." % self.__class__.__name__)

        if reduced_process is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s requires a reduced_process." % self.__class__.__name__)

        result = utils.SubtractionCurrentResult()

        ss = current.get('singular_structure').substructures[0]

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Retrieve kinematic variables from the specified PS point
        soft_leg_number = ss.legs[0].n
        # Use the momenta map, in case it has been remapped.
        # Although for the soft current it's typically not the case
        soft_leg_number = leg_numbers_map.inv[frozenset([
            soft_leg_number,
        ])]

        Q = sum([
            PS_point[l.get('number')]
            for l in reduced_process.get_initial_legs()
        ])
        Q_square = Q.square()

        # Now find all colored leg numbers in the reduced process
        all_colored_parton_numbers = []
        for leg in reduced_process.get('legs'):
            if self.model.get_particle(leg.get('id')).get('color') == 1:
                continue
            all_colored_parton_numbers.append(leg.get('number'))

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [],
            'values': {}
        })

        logMuQ = math.log(mu_r**2 / Q_square)

        prefactor = EpsilonExpansion({0: 1., 1: logMuQ, 2: 0.5 * logMuQ**2})
        prefactor *= self.SEpsilon

        # Now add the normalization factors
        prefactor *= (alpha_s / (2. * math.pi))
        prefactor.truncate(min_power=-2, max_power=2)

        #Virtuality cut in the integration
        y_0 = 0.5

        color_correlation_index = 0
        # Now loop over the colored parton number pairs (a,b)
        # and add the corresponding contributions to this current
        for i, a in enumerate(all_colored_parton_numbers):
            # Use the symmetry of the color correlation and soft current (a,b) <-> (b,a)
            for b in all_colored_parton_numbers[i + 1:]:
                evaluation['color_correlations'].append(((a, b), ))
                # We multiply by a factor 2. because we symmetrized the sum below
                value = prefactor * 2.
                pa = PS_point[a]
                pb = PS_point[b]
                Y = (pa.dot(pb) * Q_square) / (2. * Q.dot(pa) * Q.dot(pb))
                finite_part = HE.SoftFF_Finite_Gabor_DIVJAC_NOD0(y_0, Y)
                value *= EpsilonExpansion({
                    0: finite_part,
                    -1: math.log(Y),
                    -2: 0.
                })
                # Truncate expansion so as to keep only relevant terms
                value.truncate(min_power=-2, max_power=0)
                evaluation['values'][(
                    0,
                    color_correlation_index)] = value.to_human_readable_dict()
                color_correlation_index += 1

        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))

        return result
コード例 #30
0
    def evaluate_subtraction_current(self,
                                     current,
                                     higher_PS_point=None,
                                     momenta_dict=None,
                                     reduced_process=None,
                                     hel_config=None,
                                     Q=None,
                                     **opts):

        if higher_PS_point is None:
            raise CurrentImplementationError(
                self.name() + " needs the higher phase-space point.")
        if momenta_dict is None:
            raise CurrentImplementationError(self.name() +
                                             " requires a momenta dictionary.")
        if reduced_process is None:
            raise CurrentImplementationError(self.name() +
                                             " requires a reduced_process.")
        if not hel_config is None:
            raise CurrentImplementationError(
                self.name() + " does not support helicity assignment.")
        if Q is None:
            raise CurrentImplementationError(
                self.name() +
                " requires specification of the total incoming momentum Q.")

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Retrieve leg numbers
        overall_children = [
        ]  # the legs becoming unresolved in the resolved process (momenta in the real-emission ME)
        overall_parents = [
        ]  # the parent legs in the top-level reduced process (momenta in the mapped ME of this CT)
        # We take one defining structure (self.structure[0]). Its top-level substructures are the independent bundles
        # eg (C(1,2),C(4,5),S(6)).
        # Using structure[0] as the defining structure is fine for the purpose below since they should
        # all use the same leg numbers
        for bundle in self.structure[0].substructures:
            overall_children.append(
                tuple(self.leg_numbers_map[l.n]
                      for l in bundle.get_all_legs()))
            if self.has_parent(bundle, len(overall_children[-1])):
                overall_parents.append(
                    self.get_parent(frozenset(overall_children[-1]),
                                    momenta_dict))
            else:
                overall_parents.append(None)

        all_steps = [
            {
                'higher_PS_point': higher_PS_point
            },
        ]
        overall_jacobian = 1.
        for i_step, mapping_information in enumerate(self.mapping_rules):
            # Now obtain recoilers
            reduced_recoilers = mapping_information['reduced_recoilers'](
                reduced_process, excluded=tuple(overall_parents))
            additional_recoilers = sub.SubtractionLegSet(
                SubtractionLeg(self.map_leg_number(l.n, overall_parents),
                               l.pdg, l.state)
                for l in mapping_information['additional_recoilers'])
            all_recoilers = sub.SubtractionLegSet(
                list(reduced_recoilers) + list(additional_recoilers))

            # Now recursively apply leg numbers mappings
            mapping_singular_structure = mapping_information[
                'singular_structure'].get_copy()
            self.map_leg_numbers_in_singular_structure(
                mapping_singular_structure, overall_parents)
            # Now assign the recoilers (whose leg numbers have already been mapped)
            mapping_singular_structure.legs = all_recoilers

            # Build the momenta_dict by also substituting leg numbers
            this_momenta_dict = bidict({
                self.map_leg_number(k, overall_parents):
                frozenset([self.map_leg_number(n, overall_parents) for n in v])
                for k, v in mapping_information['momenta_dict'].items()
            })

            lower_PS_point, mapping_vars = mapping_information[
                'mapping'].map_to_lower_multiplicity(
                    all_steps[-1]['higher_PS_point'],
                    mapping_singular_structure,
                    this_momenta_dict,
                    compute_jacobian=self.divide_by_jacobian)
            if mappings.RelabellingMapping.needs_relabelling(
                    this_momenta_dict):
                lower_PS_point = mappings.RelabellingMapping.map_to_lower_multiplicity(
                    lower_PS_point, None, this_momenta_dict)

            all_steps[-1]['lower_PS_point'] = lower_PS_point
            # Q is provided externally
            mapping_vars.pop('Q', None)
            all_steps[-1]['mapping_vars'] = mapping_vars

            overall_jacobian *= mapping_vars.get('jacobian', 1.)

            bundles_info = []
            for bundle in mapping_singular_structure.substructures:
                bundles_info.append({})
                all_legs = bundle.get_all_legs()
                # This sorting is important so that the variables generated can be related to the legs specified
                # in the mapping singular structures of the mapping rules
                all_initial_legs = sorted(
                    [l for l in all_legs if l.state == l.INITIAL],
                    key=lambda l: l.n)
                all_final_legs = sorted(
                    [l for l in all_legs if l.state == l.FINAL],
                    key=lambda l: l.n)
                bundles_info[-1]['initial_state_children'] = tuple(
                    l.n for l in all_initial_legs)
                bundles_info[-1]['final_state_children'] = tuple(
                    l.n for l in all_final_legs)
                if self.has_parent(bundle, len(all_legs)):
                    bundles_info[-1]['parent'] = self.get_parent(
                        frozenset(l.n for l in all_legs), this_momenta_dict)
                else:
                    bundles_info[-1]['parent'] = None

                # Retrieve kinematics
                bundles_info[-1]['cut_inputs'] = {}
                if bundle.name() == 'C':
                    if len(all_initial_legs) > 0:
                        bundles_info[-1]['cut_inputs']['pA'] = -sum(
                            all_steps[-1]['higher_PS_point'][l.n]
                            for l in all_initial_legs)
                    bundles_info[-1]['cut_inputs']['pC'] = sum(
                        all_steps[-1]['higher_PS_point'][l.n]
                        for l in all_final_legs)
                elif bundle.name() == 'S':
                    bundles_info[-1]['cut_inputs']['pS'] = sum(
                        all_steps[-1]['higher_PS_point'][l.n]
                        for l in all_final_legs)

            all_steps[-1]['bundles_info'] = bundles_info

            # Get all variables for this level
            if mapping_information['variables'] is not None:
                all_steps[-1]['variables'] = mapping_information['variables'](
                    all_steps[-1]['higher_PS_point'],
                    all_steps[-1]['lower_PS_point'],
                    bundles_info,
                    Q=Q,
                    **mapping_vars)
            else:
                all_steps[-1]['variables'] = {}

            # Add the next higher PS point for the next level if necessary
            if i_step < (len(self.mapping_rules) - 1):
                all_steps.append({'higher_PS_point': lower_PS_point})

        overall_lower_PS_point = all_steps[-1]['lower_PS_point']
        reduced_kinematics = (None, overall_lower_PS_point)

        global_variables = {
            'overall_children': overall_children,
            'overall_parents': overall_parents,
            'leg_numbers_map': self.leg_numbers_map,
            'Q': Q,
        }
        # Build global variables if necessary
        if self.variables is not None:
            global_variables.update(self.variables(all_steps,
                                                   global_variables))

        # Apply cuts: include the counterterm only in a part of the phase-space
        for i_step, step_info in enumerate(all_steps):
            cut_inputs = dict(step_info)
            if self.mapping_rules[i_step]['is_cut'](cut_inputs,
                                                    global_variables):
                return utils.SubtractionCurrentResult.zero(
                    current=current,
                    hel_config=hel_config,
                    reduced_kinematics=('IS_CUT', overall_lower_PS_point))

#        for i_step, step_info in enumerate(all_steps):
#            misc.sprint("Higher PS point at step #%d: %s"%(i_step, str(step_info['higher_PS_point'])))
#            misc.sprint("Lower PS point at step  #%d: %s"%(i_step, str(step_info['lower_PS_point'])))

# Evaluate kernel
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [
                None,
            ],
            'color_correlations': [
                None,
            ],
            'reduced_kinematics': [
                reduced_kinematics,
            ],
            'values': {}
        })

        # Apply collinear kernel (can be dummy)
        evaluation = self.kernel(evaluation, all_steps, global_variables)

        # Apply soft kernel (can be dummy), which also knows about the reduced process
        evaluation = self.call_soft_kernel(evaluation, reduced_process,
                                           all_steps, global_variables)

        # Add the normalization factors
        # WARNING! In this implementation the propagator denominators must be included in the kernel evaluation.
        norm = (8. * math.pi * alpha_s)**(self.squared_orders['QCD'] / 2)
        norm /= overall_jacobian
        for k in evaluation['values']:
            for term in evaluation['values'][k]:
                evaluation['values'][k][term] *= norm

        # Construct and return result
        result = utils.SubtractionCurrentResult()
        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))
        return result