コード例 #1
0
 def P_qq(color_factors, z, kT):
     return [
         (None, EpsilonExpansion({
             0: color_factors.TR,
         })),
         (kT,
          EpsilonExpansion({
              0:
              color_factors.TR * (4. * z * (1. - z) * (1. / kT.square())),
          })),
     ]
コード例 #2
0
    def kernel(self, evaluation, all_steps_info, global_variables):
        """ Evaluate this I(FF) counterterm given the supplied variables. """

        kT_FF = all_steps_info[0]['variables'][0]['kTs'][(0,(1,))]
        z_FF  = all_steps_info[0]['variables'][0]['zs'][0]
        s_rs  = all_steps_info[0]['variables'][0]['ss'][(0,1)]

        kT_IF = global_variables['kTs'][0]
        x_IF  = global_variables['xs'][0]
        s_a_rs = global_variables['ss'][(0,1)]

        p_a_tilde = global_variables['p_a_tilde']

        p_rs_hat = all_steps_info[0]['lower_PS_point'][
            all_steps_info[0]['bundles_info'][0]['parent']
        ]

#        misc.sprint(s_rs,s_a_rs)
#        misc.sprint(p_a_tilde,p_rs_hat,p_a_tilde.dot(p_rs_hat))
#        misc.sprint(p_a_tilde,kT_FF,p_a_tilde.dot(kT_FF))
#        misc.sprint(kT_FF, kT_FF.square())
#        misc.sprint(x_IF)
#        misc.sprint(z_FF,(1.-z_FF))
        evaluation['values'][(0,0,0)] = EpsilonExpansion({'finite':
            (2./(s_rs*s_a_rs))*self.TR*self.CF*(
                1./(1.-x_IF) + z_FF * (1. - z_FF) * ((2.*p_a_tilde.dot(kT_FF))**2)/(kT_FF.square()*(2.*p_a_tilde.dot(p_rs_hat)))
            )
        })
        return evaluation
コード例 #3
0
    def P_qqpqp(color_factors, z_i, z_r, z_s, s_ir, s_is, s_rs, kT_i, kT_r,
                kT_s):
        """ Kernel for the q -> q qp' qp' splitting. The return value is not a float but a list of tuples:
                ( spin_correlation_vectors_with_parent, weight )
            where spin_correlation_vector_with_parent can be None if None is required.
        """

        # Compute handy term and variables
        s_irs = s_ir + s_is + s_rs
        t_rs_i = 2. * (z_r * s_is - z_s * s_ir) / (z_r + z_s) + (
            (z_r - z_s) / (z_r + z_s)) * s_rs
        dimensional_term = z_r + z_s - s_rs / s_irs

        # Overall prefactor
        prefactor = (1. / 2.) * color_factors.CF * color_factors.TR * (s_irs /
                                                                       s_rs)

        return [
            (None,
             EpsilonExpansion({
                 0:
                 -(t_rs_i**2 / (s_rs * s_irs)) + (4. * z_i + (z_r - z_s)**2) /
                 (z_r + z_s) + dimensional_term,
                 1:
                 -2. * dimensional_term
             }) * prefactor)
        ]
コード例 #4
0
    def eikonal_g(color_factors, pi, pk, pr, spin_corr_vector=None):
        """ Gluon eikonal, with p_i^\mu p_i^\nu in the numerator, dotted into each other if spin_corr_vector=None
        otherwise dotted with the spin_corr_vector."""

        s_ir = pi.dot(pr)
        s_kr = pk.dot(pr)
        numerator = pi.dot(pk) if spin_corr_vector is None else pi.dot(
            spin_corr_vector) * pk.dot(spin_corr_vector)

        return EpsilonExpansion({'finite': numerator / (s_ir * s_kr)})
コード例 #5
0
class Constants(object):
    """Constants used throughout the implementation of the counterterms."""
    
    # Epsilon expansion constants
    EulerGamma = 0.57721566490153286061
    SEpsilon =  EpsilonExpansion({ 
         0 : 1., 
         1 : -EulerGamma + math.log(4.*math.pi),
         2 : 0.5*(EulerGamma**2-2.*EulerGamma*math.log(4.*math.pi)+math.log(4.*math.pi)**2)
    })
    
    # SU(3) group constants
    TR = 0.5
    NC = 3.0
    CF = (NC ** 2 - 1) / (2 * NC)
    CA = NC
コード例 #6
0
    def P_q_qpqp(color_factors, z_rs, z_i_rs, kT_rs, pi_hat, p_rs_hat):
        """ Kernel for the q -> q (qp' qp') strongly ordered splitting. The return value is not a float but a list of tuples:
                ( spin_correlation_vectors_with_parent, weight )
            where spin_correlation_vector_with_parent can be None if None is required.
        """

        # Overall prefactor
        result = (AltarelliParisiKernels.P_qg_averaged(color_factors, z_i_rs) +
                  EpsilonExpansion({
                      0:
                      -2. * color_factors.CF * z_rs * (1. - z_rs) *
                      (1 - z_i_rs - ((2. * kT_rs.dot(pi_hat))**2) /
                       (kT_rs.square() * (2. * pi_hat.dot(p_rs_hat))))
                  })) * color_factors.TR

        return [(None, result)]
コード例 #7
0
    def soft_kernel(self, evaluation, colored_partons, all_steps_info, global_variables):
        """Evaluate a collinear type of splitting kernel, which *does* need to know about the reduced process
        Should be specialised by the daughter class if not dummy
        """

        new_evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations' : [ None, ],
            'color_correlations': [ ],
            'reduced_kinematics': evaluation['reduced_kinematics'],
            'values': { }
        })

        overall_lower_PS_point = all_steps_info[-1]['lower_PS_point']
        soft_leg_number = all_steps_info[-1]['bundles_info'][0]['final_state_children'][0]
        pr = all_steps_info[-1]['higher_PS_point'][soft_leg_number]
        colored_parton_numbers = sorted(colored_partons.keys())

        for i, a in enumerate(colored_parton_numbers):
            for b in colored_parton_numbers[i:]:
                # Write the eikonal for that pair
                if a!=b:
                    mult_factor = 1.
                else:
                    mult_factor = 1./2.

                pi = overall_lower_PS_point[a]
                pk = overall_lower_PS_point[b]
                composite_weight = EpsilonExpansion({'finite': 0.})
                for (sc, cc, rk), coll_weight in evaluation['values'].items():
                    if evaluation['spin_correlations'][sc] is None:
                        # We *subtract* here the contribution because the non-spin-correlated contributin is -g^{\mu\nu}
                        composite_weight -= SoftKernels.eikonal_g(self, pi, pk, pr, spin_corr_vector=None)*EpsilonExpansion(coll_weight)
                    else:
                        # Normally the collinear current should have built spin-correlations with the leg number corresponding
                        # to the soft one in the context of this soft current
                        assert len(evaluation['spin_correlations'][sc])==1
                        parent_number, spin_corr_vecs = evaluation['spin_correlations'][sc][0]
                        assert soft_leg_number==parent_number
                        assert len(spin_corr_vecs)==1
                        spin_corr_vec = spin_corr_vecs[0]
                        composite_weight += SoftKernels.eikonal_g(self, pi, pk, pr, spin_corr_vector=spin_corr_vec)*EpsilonExpansion(coll_weight)
                new_evaluation['color_correlations'].append( ((a, b), ) )
                new_evaluation['values'][(0,len(new_evaluation['color_correlations'])-1,0)] = composite_weight*mult_factor

        return new_evaluation
コード例 #8
0
    def eikonal_qqx(color_factors, pi, pk, pr, ps):
        """ Taken from Gabor's notes on colorful ISR@NNLO."""

        s_ir = pi.dot(pr)
        s_ks = pk.dot(ps)
        s_is = pi.dot(ps)
        s_kr = pk.dot(pr)
        s_ik = pi.dot(pk)
        s_rs = pr.dot(ps)
        s_i_rs = pi.dot(pr + ps)
        s_k_rs = pk.dot(pr + ps)

        return EpsilonExpansion({
            'finite':
            color_factors.TR *
            (((s_ir * s_ks + s_is * s_kr - s_ik * s_rs) / (s_i_rs * s_k_rs)) -
             ((s_ir * s_is) / (s_i_rs**2)) - ((s_kr * s_ks) / (s_k_rs**2)))
        })
コード例 #9
0
    def __init__(self, model, **opts):

        super(QCDCurrent, self).__init__(model, **opts)
        # Extract constants from the UFO model if present, otherwise take default values
        try:
            model_param_dict = self.model.get('parameter_dict')
        except:
            model_param_dict = dict()

        self.TR = model_param_dict.get('TR', utils.Constants.TR)
        self.NC = model_param_dict.get('NC', utils.Constants.NC)
        self.CF = model_param_dict.get('CF', (self.NC**2 - 1) / (2 * self.NC))
        self.CA = model_param_dict.get('CA', self.NC)

        self.EulerGamma = utils.Constants.EulerGamma
        # S_\eps = (4 \[Pi])^\[Epsilon] E^(-\[Epsilon] EulerGamma)
        self.SEpsilon = utils.Constants.SEpsilon
        # The SEpsilon volume factor is factorized from all virtual and integrated contributions
        # so that the poles between the two cancel even before multiplying by SEpsilon, hence
        # making the result equivalent to what one would have obtained by multiplying by SEpsilon=1.
        self.SEpsilon = EpsilonExpansion({0: 1.})
コード例 #10
0
    def evaluate_integrated_current(self,
                                    current,
                                    PS_point,
                                    reduced_process=None,
                                    leg_numbers_map=None,
                                    hel_config=None,
                                    compute_poles=False,
                                    **opts):
        """ Evaluates this current and return the corresponding instance of
        SubtractionCurrentResult. See documentation of the mother function for more details."""

        if not hel_config is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s does not support helicity assignment." %
                self.__class__.__name__)

        if leg_numbers_map is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s requires the leg_number_map." % self.__class__.__name__)

        if reduced_process is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s requires a reduced_process." % self.__class__.__name__)

        result = utils.SubtractionCurrentResult()

        ss = current.get('singular_structure').substructures[0]

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Retrieve kinematic variables from the specified PS point
        soft_leg_number = ss.legs[0].n
        # Use the momenta map, in case it has been remapped.
        # Although for the soft current it's typically not the case
        soft_leg_number = leg_numbers_map.inv[frozenset([
            soft_leg_number,
        ])]

        Q = sum([
            PS_point[l.get('number')]
            for l in reduced_process.get_initial_legs()
        ])
        Q_square = Q.square()

        # Now find all colored leg numbers in the reduced process
        all_colored_parton_numbers = []
        for leg in reduced_process.get('legs'):
            if self.model.get_particle(leg.get('id')).get('color') == 1:
                continue
            all_colored_parton_numbers.append(leg.get('number'))

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [],
            'values': {}
        })

        logMuQ = math.log(mu_r**2 / Q_square)

        prefactor = EpsilonExpansion({0: 1., 1: logMuQ, 2: 0.5 * logMuQ**2})
        prefactor *= self.SEpsilon

        # Now add the normalization factors
        prefactor *= (alpha_s / (2. * math.pi))
        prefactor.truncate(min_power=-2, max_power=2)

        #Virtuality cut in the integration
        y_0 = 0.5

        color_correlation_index = 0
        # Now loop over the colored parton number pairs (a,b)
        # and add the corresponding contributions to this current
        for i, a in enumerate(all_colored_parton_numbers):
            # Use the symmetry of the color correlation and soft current (a,b) <-> (b,a)
            for b in all_colored_parton_numbers[i + 1:]:
                evaluation['color_correlations'].append(((a, b), ))
                # We multiply by a factor 2. because we symmetrized the sum below
                value = prefactor * 2.
                pa = PS_point[a]
                pb = PS_point[b]
                Y = (pa.dot(pb) * Q_square) / (2. * Q.dot(pa) * Q.dot(pb))
                finite_part = HE.SoftFF_Finite_Gabor_DIVJAC_NOD0(y_0, Y)
                value *= EpsilonExpansion({
                    0: finite_part,
                    -1: math.log(Y),
                    -2: 0.
                })
                # Truncate expansion so as to keep only relevant terms
                value.truncate(min_power=-2, max_power=0)
                evaluation['values'][(
                    0,
                    color_correlation_index)] = value.to_human_readable_dict()
                color_correlation_index += 1

        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))

        return result
コード例 #11
0
    def evaluate_integrated_current(self,
                                    current,
                                    PS_point,
                                    reduced_process=None,
                                    leg_numbers_map=None,
                                    hel_config=None,
                                    compute_poles=False,
                                    **opts):
        """ Now evalaute the current and return the corresponding instance of
        SubtractionCurrentResult. See documentation of the mother function for more details."""

        if not hel_config is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s does not support helicity assignment." %
                self.__class__.__name__)
        if leg_numbers_map is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s requires the leg_number_map." % self.__class__.__name__)
        if reduced_process is None:
            raise CurrentImplementationError(
                "Subtraction current implementation " +
                "%s requires the reduced_process." % self.__class__.__name__)

        result = utils.SubtractionCurrentResult()

        ss = current.get('singular_structure').substructures[0]

        # Retrieve alpha_s and mu_r
        model_param_dict = self.model.get('parameter_dict')
        alpha_s = model_param_dict['aS']
        mu_r = model_param_dict['MU_R']

        # Retrieve kinematic variables from the specified PS point
        children_numbers = tuple(leg.n for leg in ss.legs)
        parent_number = leg_numbers_map.inv[frozenset(children_numbers)]

        p12 = PS_point[parent_number]
        Q = sum([
            PS_point[l.get('number')]
            for l in reduced_process.get_initial_legs()
        ])
        Q_square = Q.square()
        y12 = 2. * Q.dot(p12) / Q_square

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [None],
            'values': {
                (0, 0): {}
            }
        })

        #Virtuality cut in the integration
        alpha_0 = currents.SomogyiChoices.alpha_0
        finite_part = HE.CggFF_Finite_Gabor_DIVJAC_NOD0(alpha_0, y12)

        value = EpsilonExpansion({
            0: finite_part,
            -1: (11. / 3. - 4. * math.log(y12)),
            -2: 2.
        })

        logMuQ = math.log(mu_r**2 / Q_square)

        prefactor = EpsilonExpansion({0: 1., 1: logMuQ, 2: 0.5 * logMuQ**2})
        prefactor *= self.SEpsilon

        # Now add the normalization factors
        value *= prefactor * (alpha_s / (2. * math.pi)) * self.CA
        # Truncate expansion so as to keep only relevant terms
        value.truncate(min_power=-2, max_power=0)

        # Now register the value in the evaluation
        evaluation['values'][(0, 0)] = value.to_human_readable_dict()

        # And add it to the results
        result.add_result(evaluation,
                          hel_config=hel_config,
                          squared_orders=tuple(
                              sorted(current.get('squared_orders').items())))

        return result
コード例 #12
0
    def evaluate_kernel(self,
                        PS_point,
                        process,
                        xi,
                        mu_r,
                        mu_f,
                        Q,
                        normalization,
                        allowed_backward_evolved_flavors='ALL'):
        """ Return an instance of BeamFactorizationCurrentEvaluation, whose 'values' entry
        are dictionaries specifying the counterterm in flavor space, for the value of xi 
        specified in argument."""

        if allowed_backward_evolved_flavors != 'ALL':
            raise CurrentImplementationError(
                'The current %s must always be called with' %
                self.__class__.__name__ +
                "allowed_backward_evolved_flavors='ALL', not %s" %
                str(allowed_backward_evolved_flavors))

        # Only the order epsilon of the scales pre-factor matters here.
        prefactor = EpsilonExpansion({0: 1., 1: log(mu_r**2 / mu_f**2)})
        prefactor *= EpsilonExpansion({-1: 1.}) * normalization

        # Assign a fake xi for now if the distribution type is 'endpoint'
        # TODO: this is not optimal, eventually we should put each of these three pieces in
        # separate currents
        if self.distribution_type == 'endpoint':
            xi = 0.5

        # Define the NLO QCD PDF counterterms kernels
        kernel_gg = {
            'bulk':
            prefactor * (2. * self.CA * (1. / (1. - xi) +
                                         (1. - xi) / xi - 1. + xi * (1 - xi))),
            'counterterm':
            prefactor * (2. * self.CA / (1. - xi)),
            'endpoint':
            prefactor * (11. / 6. * self.CA - 2. / 3. * self.NF * self.TR)
        }

        kernel_gq = {
            'bulk': prefactor * (self.CF * (1. + (1. - xi)**2) / xi),
            'counterterm': None,
            'endpoint': None
        }

        kernel_qg = {
            'bulk': prefactor * (self.TR * (xi**2 + (1. - xi)**2)),
            'counterterm': None,
            'endpoint': None
        }

        kernel_qq = {
            'bulk': prefactor * (self.CF * ((1. + xi**2) / (1. - xi))),
            'counterterm': prefactor * (self.CF * ((1. + xi**2) / (1. - xi))),
            'endpoint': None
        }

        active_quark_PDGs = tuple([
            pdg for pdg in range(1, 7) + range(-1, -7, -1)
            if pdg in self.beam_PDGs
        ])

        # Build the NLO flavor matrix
        flavor_matrix = {}
        for reduced_flavor in self.beam_PDGs:
            # Gluon backward evolution
            if reduced_flavor == 21:
                gluon_dict = {}
                if kernel_gg[self.distribution_type] is not None:
                    gluon_dict[(21, )] = kernel_gg[self.distribution_type]
                if active_quark_PDGs and kernel_gq[
                        self.distribution_type] is not None:
                    gluon_dict[active_quark_PDGs] = kernel_gq[
                        self.distribution_type]
                if gluon_dict:
                    flavor_matrix[21] = gluon_dict

            # Quark backward evolution
            if reduced_flavor in active_quark_PDGs:
                quark_dict = {}
                if kernel_qg[self.distribution_type] is not None:
                    quark_dict[(21, )] = kernel_qg[self.distribution_type]
                if kernel_qq[self.distribution_type] is not None:
                    quark_dict[(
                        reduced_flavor, )] = kernel_qq[self.distribution_type]
                if quark_dict:
                    flavor_matrix[reduced_flavor] = quark_dict

        # Truncate all entries of the flavor matrix so as to remove irrelevant O(\eps) terms
        for flav_in, flav_outs in flavor_matrix.items():
            for flav_out, eps_expansion in flav_outs.items():
                eps_expansion.truncate(max_power=0)

        # Now assign the flavor matrix in the BeamFactorizationCurrentEvaluation instance
        evaluation = utils.BeamFactorizationCurrentEvaluation({
            'spin_correlations': [
                None,
            ],
            'color_correlations': [
                None,
            ],
            'values': {
                (0, 0): flavor_matrix
            }
        })

        return evaluation
コード例 #13
0
    def evaluate_kernel(self,
                        PS_point,
                        process,
                        xi,
                        mu_r,
                        mu_f,
                        Q,
                        normalization,
                        allowed_backward_evolved_flavors='ALL'):
        """ Return an instance of SubtractionCurrentEvaluation, whose 'values' entry
        are simple EpsilonExpansions since soft-integrated counterterms convoluted in a 
        correlated fashion with the initial state beams *cannot* act in flavor space."""

        if allowed_backward_evolved_flavors != 'ALL':
            raise CurrentImplementationError(
                'The current %s must always be called with' %
                self.__class__.__name__ +
                "allowed_backward_evolved_flavors='ALL', not %s" %
                str(allowed_backward_evolved_flavors))

        if process is None:
            raise CurrentImplementationError(self.name() +
                                             " requires a reduced_process.")

        # Now find all colored leg numbers in the reduced process
        # also find the initial state colored partons to tag initial/final eikonals
        all_colored_parton_numbers = []
        colored_initial_parton_numbers = []
        all_initial_numbers = [
            l.get('number') for l in process.get_initial_legs()
        ]
        for leg in process.get('legs'):
            leg_number = leg.get('number')
            if self.model.get_particle(leg.get('id')).get('color') == 1:
                continue
            all_colored_parton_numbers.append(leg_number)
            if leg_number in all_initial_numbers:
                colored_initial_parton_numbers.append(leg.get('number'))

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations': [None],
            'color_correlations': [],
            'values': {}
        })

        # Obtain Q_square
        #Q        = sum([PS_point[l.get('number')] for l in process.get_initial_legs()])
        Q_square = Q.square()

        # Only up to the order epsilon^2 of the scales prefactor matters here.
        logMuQ = log(mu_r**2 / Q_square)
        prefactor = EpsilonExpansion({0: 1., 1: logMuQ, 2: 0.5 * logMuQ**2})
        prefactor *= self.SEpsilon * normalization

        color_correlation_index = 0
        # Now loop over the colored parton number pairs (a,b)
        # and add the corresponding contributions to this current
        for i, a in enumerate(all_colored_parton_numbers):
            # Use the symmetry of the color correlation and soft current (a,b) <-> (b,a)
            for b in all_colored_parton_numbers[i + 1:]:
                # Write the integrated eikonal for that pair

                evaluation['color_correlations'].append(((a, b), ))

                pa = PS_point[a]
                pb = PS_point[b]

                # We can only handle massless particles
                try:
                    assert pa.square() / Q_square < 1.e-09
                except AssertionError:
                    misc.sprint(
                        "No massive particles in soft currents for now")
                    raise

                # Assign the type of dipole
                # dipole_type = [bool_a, bool_b] tags the dipole tag, with True indicating initial state and False final
                dipole_type = [
                    a in colored_initial_parton_numbers, b
                    in colored_initial_parton_numbers
                ]

                if not any(dipole_type):  # Final-final
                    raise NotImplementedError
                elif not dipole_type[0]:  # a final, b initial
                    raise NotImplementedError
                elif not dipole_type[1]:  # b final, a initial
                    raise NotImplementedError
                else:  # initial initial
                    # Initial-initial: S+CS = 0
                    if self.distribution_type == 'bulk':
                        kernel = EpsilonExpansion({0: 0})
                    elif self.distribution_type == 'counterterm':
                        kernel = EpsilonExpansion({0: 0})
                    elif self.distribution_type == 'endpoint':
                        kernel = EpsilonExpansion({0: 0})
                    else:
                        raise CurrentImplementationError(
                            "Distribution type '%s' not supported." %
                            self.distribution_type)
                    # Former implementation of the II soft+SC. Commented by Nicolas
                    # While no longer useful, this is kept for now to remember how non-zero integrated soft shoud be implemented
                    # if self.distribution_type == 'bulk':
                    #     kernel = EpsilonExpansion({
                    #                 0 : - 16.*xi * log(1.-xi**2) /(1.-xi**2),
                    #                 -1 : 8. * xi / (1.-xi**2),
                    #                 -2 : 0.
                    #     })
                    # elif self.distribution_type == 'counterterm':
                    #     kernel = EpsilonExpansion({
                    #                 0 : -8.*log(2.*(1.-xi))/(1.-xi),
                    #                 -1 : 4./(1.-xi),
                    #                 -2 : 0.
                    #     })
                    # elif self.distribution_type == 'endpoint':
                    #     kernel = EpsilonExpansion({
                    #                 0 : pi**2./3.-4.*log(2.)**2,
                    #                 -1 : 4.*log(2.),
                    #                 -2 : -2.
                    #     })
                    # else:
                    #     raise CurrentImplementationError("Distribution type '%s' not supported."
                    #                                                     %self.distribution_type)

                evaluation['values'][(
                    0, color_correlation_index)] = kernel * normalization
                color_correlation_index += 1

        return evaluation
コード例 #14
0
    def evaluate_kernel(self,
                        PS_point,
                        process,
                        xi,
                        mu_r,
                        mu_f,
                        Q,
                        normalization,
                        allowed_backward_evolved_flavors='ALL'):
        """ Return an instance of BeamFactorizationCurrentEvaluation, whose 'values' entry
        are dictionaries specifying the counterterm in flavor space, for the value of xi 
        specified in argument."""

        # Obtain Q_square.
        Q_square = Q.square()

        # Only up to the order epsilon^2 of the scales prefactor matters here.
        logMuQ = log(mu_r**2 / Q_square)
        prefactor = EpsilonExpansion({0: 1., 1: logMuQ, 2: 0.5 * logMuQ**2})
        prefactor *= self.SEpsilon * normalization

        # The additional 1/x part of the prefactor is included later during the PDF
        # convolution of the event (using its 'Bjorken rescaling' attribute) because
        # we must make sure that the plus distribution hits on it.
        # Also, the same 1/x appears in the PDF counterterms as a result of the change
        # of variable necessary to bring them in the form where the plus distribution
        # only acts on the PDF. So it makes sense to keep it completely factorised.

        # Input variables
        y_0 = currents.SomogyiChoices.y_0_prime
        logy0 = log(y_0)
        # Assign a fake x for now if the distribution type is 'endpoint'
        # TODO: this is not optimal, eventually we should put each of these three pieces in
        # separate currents
        if self.distribution_type == 'endpoint':
            x = 0.5
        else:
            x = xi

        # In MadNkLO, we use the change of variable xb' = xb*xi so that the factor
        # (Q^2)^\eps in Eq. 5.21 of https://arxiv.org/pdf/0903.1218.pdf actually reads
        # (Q^2/(xi1*xi2))^\eps and the '+' distributions also act on it, which we realize
        # by simply multiplying the Q^2 provided by the xi factor that must be set to one.
        logMuQ_plus = log(mu_r**2 / (Q_square * x))
        prefactor_plus = EpsilonExpansion({
            0: 1.,
            1: logMuQ_plus,
            2: 0.5 * logMuQ_plus**2
        })
        prefactor_plus *= self.SEpsilon * normalization

        log1mx = log(1. - x)

        # Heaviside
        theta_x_1my0 = 1. if (x - (1 - y_0)) >= 0. else 0.
        theta_1my0_x = 1. if ((1 - y_0) - x) >= 0. else 0.

        # Define the NLO QCD integrate initial-state single collinear counterterms kernels
        color_factor = self.CA
        kernel_gg = {
            'bulk':
            prefactor * color_factor * (EpsilonExpansion({
                -1:
                -2. * (1. / (1. - x) + (1. - x) / x - 1 + x * (1 - x)),
                0: (2. * log1mx / (1. - x)) * (1. + theta_x_1my0) +
                (2. * logy0 / (1. - x)) * theta_1my0_x + 2. *
                (((1. - x) / x) - 1. + x * (1. - x)) *
                (log1mx * (1. + theta_x_1my0) + logy0 * theta_1my0_x)
            })),
            'counterterm':
            prefactor_plus * color_factor * (EpsilonExpansion({
                -1:
                -2. * (1. / (1. - x)),
                0: (2. * log1mx / (1. - x)) * (1. + theta_x_1my0),
            })),
            'endpoint':
            prefactor * color_factor * (EpsilonExpansion(
                {
                    -2: 1.,
                    -1: 0.,
                    0: -(math.pi**2 / 6.) + logy0**2
                }))
        }

        color_factor = self.CA
        kernel_gq = {
            'bulk':
            prefactor * color_factor * (EpsilonExpansion({
                -1:
                -(self.CF / self.CA) * (1. + (1. - x)**2) / x,
                0: (self.CF / self.CA) *
                (((1. + (1. - x)**2) / x) *
                 (log1mx * (1. + theta_x_1my0) + logy0 * theta_1my0_x) + x)
            })),
            'counterterm':
            None,
            'endpoint':
            None
        }

        color_factor = self.CF
        kernel_qg = {
            'bulk':
            prefactor * color_factor * (EpsilonExpansion({
                -1:
                -(self.TR / self.CF) * (x**2 + (1. - x)**2),
                0: (self.TR / self.CF) *
                ((x**2 + (1. - x)**2) *
                 (log1mx *
                  (1. + theta_x_1my0) + logy0 * theta_1my0_x) + 2. * x *
                 (1. - x))
            })),
            'counterterm':
            None,
            'endpoint':
            None
        }

        color_factor = self.CF
        kernel_qq = {
            'bulk':
            prefactor * color_factor * (EpsilonExpansion({
                -1:
                -((1. + x**2) / (1. - x)),
                0: (2. * log1mx / (1. - x)) * (1. + theta_x_1my0) +
                (2. * logy0 / (1. - x)) * theta_1my0_x -
                ((1. + x) *
                 (log1mx *
                  (1. + theta_x_1my0) + logy0 * theta_1my0_x) - 1. + x)
            })),
            'counterterm':
            prefactor_plus * color_factor * (EpsilonExpansion({
                -1:
                -((1. + x**2) / (1. - x)),
                0: (2. * log1mx / (1. - x)) * (1. + theta_x_1my0),
            })),
            'endpoint':
            prefactor * color_factor * (EpsilonExpansion(
                {
                    -2: 1.,
                    -1: 3. / 2.,
                    0: -(math.pi**2 / 6.) + logy0**2
                }))
        }

        active_quark_PDGs = tuple([
            pdg for pdg in range(1, 7) + range(-1, -7, -1)
            if pdg in self.beam_PDGs
        ])

        # Build the NLO flavor matrix
        flavor_matrix = {}
        for reduced_flavor in self.beam_PDGs:
            # Gluon backward evolution
            if reduced_flavor == 21:
                gluon_dict = {}
                if kernel_gg[self.distribution_type] is not None:
                    gluon_dict[(21, )] = kernel_gg[self.distribution_type]
                if active_quark_PDGs and kernel_gq[
                        self.distribution_type] is not None:
                    gluon_dict[active_quark_PDGs] = kernel_gq[
                        self.distribution_type]
                if gluon_dict:
                    flavor_matrix[21] = gluon_dict

            # Quark backward evolution
            if reduced_flavor in active_quark_PDGs:
                quark_dict = {}
                if kernel_qg[self.distribution_type] is not None:
                    quark_dict[(21, )] = kernel_qg[self.distribution_type]
                if kernel_qq[self.distribution_type] is not None:
                    quark_dict[(
                        reduced_flavor, )] = kernel_qq[self.distribution_type]
                if quark_dict:
                    flavor_matrix[reduced_flavor] = quark_dict

        # Truncate all entries of the flavor matrix so as to remove irrelevant O(\eps) terms
        for flav_in, flav_outs in flavor_matrix.items():
            for flav_out, eps_expansion in flav_outs.items():
                eps_expansion.truncate(max_power=0)

        # Now apply the mask 'allowed_backward_evolved_flavors' if not set to 'ALL'
        filtered_flavor_matrix = self.apply_flavor_mask(
            flavor_matrix, allowed_backward_evolved_flavors)

        # Now assign the flavor matrix in the BeamFactorizationCurrentEvaluation instance
        evaluation = utils.BeamFactorizationCurrentEvaluation({
            'spin_correlations': [
                None,
            ],
            'color_correlations': [
                None,
            ],
            'values': {
                (0, 0): filtered_flavor_matrix
            }
        })

        return evaluation
コード例 #15
0
 def P_qg_averaged(color_factors, z):
     return EpsilonExpansion({
         0: color_factors.CF * ((1. + z**2) / (1. - z)),
         1: color_factors.CF * (-(1 - z))
     })
コード例 #16
0
    def evaluate_kernel(self, PS_point, process, xi, mu_r, mu_f, Q, normalization,
                                                    allowed_backward_evolved_flavors='ALL'):
        """ Return an instance of SubtractionCurrentEvaluation, whose 'values' entry
        are simple EpsilonExpansions since soft-integrated counterterms convoluted in a 
        correlated fashion with the initial state beams *cannot* act in flavor space."""

        if allowed_backward_evolved_flavors != 'ALL':
            raise CurrentImplementationError('The current %s must always be called with'%self.__class__.__name__+
                "allowed_backward_evolved_flavors='ALL', not %s"%str(allowed_backward_evolved_flavors))

        if process is None:
            raise CurrentImplementationError(self.name() + " requires a reduced_process.")

        # Now find all colored leg numbers in the reduced process
        # also find the initial state colored partons to tag initial/final eikonals
        all_colored_parton_numbers = []
        colored_initial_parton_numbers = []
        all_initial_numbers = [l.get('number') for l in process.get_initial_legs()]
        for leg in process.get('legs'):
            leg_number =leg.get('number')
            if self.model.get_particle(leg.get('id')).get('color')==1:
                continue
            all_colored_parton_numbers.append(leg_number)
            if leg_number in all_initial_numbers:
                colored_initial_parton_numbers.append(leg.get('number'))

        # Now instantiate what the result will be
        evaluation = utils.SubtractionCurrentEvaluation({
            'spin_correlations'   : [ None ],
            'color_correlations'  : [],
            'values'              : {}
        })

        # Obtain Q_square
        #Q        = sum([PS_point[l.get('number')] for l in process.get_initial_legs()])
        Q_square = Q.square()

        # Only up to the order epsilon^2 of the scales prefactor matters here.
        logMuQ = log(mu_r**2/Q_square)
        # Correction for the counterterm: in BS (bulk+counterterm), the variable Q_square corresponds to that
        # of the real event. However the counterterm corresponds to the residue of the bulk at xi=1.
        # This is effectively obtained by multiplying by xi: Q_residue = Q_real * xi.
        # Note for future dumb-me: log(mu_r**2/(Q_square*xi**2)) = logMuQ - log(xi**2)
        if self.distribution_type == 'counterterm':
            logMuQ-=log(xi**2)        
        prefactor = EpsilonExpansion({ 0 : 1., 1 : logMuQ, 2 : 0.5*logMuQ**2 })
        prefactor *= normalization

        color_correlation_index = 0
        # Now loop over the colored parton number pairs (a,b)
        # and add the corresponding contributions to this current
        for i, a in enumerate(all_colored_parton_numbers):
            # Use the symmetry of the color correlation and soft current (a,b) <-> (b,a)
            for b in all_colored_parton_numbers[i+1:]:
                # Write the integrated eikonal for that pair

                evaluation['color_correlations'].append( ((a, b), ) )

                pa = PS_point[a]
                pb = PS_point[b]

                # We can only handle massless particles
                try:
                    assert pa.square()/Q_square < 1.e-09
                except AssertionError:
                    misc.sprint("No massive particles in soft currents for now")
                    raise

                # Assign the type of dipole
                # dipole_type = [bool_a, bool_b] tags the dipole tag, with True indicating initial state and False final
                dipole_type = [a in colored_initial_parton_numbers, b in colored_initial_parton_numbers]

                if all(dipole_type): # Initial-initial
                    # Initial-initial: S+CS = 0
                    if self.distribution_type == 'bulk':
                        kernel = EpsilonExpansion({0:0})
                    elif self.distribution_type == 'counterterm':
                        kernel = EpsilonExpansion({0:0})
                    elif self.distribution_type == 'endpoint':
                        kernel = EpsilonExpansion({0:0})
                    else:
                        raise CurrentImplementationError("Distribution type '%s' not supported."
                                                                        %self.distribution_type)
                else: # At least one leg final
                    # The integrated counterterms are evaluated in terms of
                    # dipole_invariant = 1-cos(angle between the dipole momenta)
                    dipole_invariant = 0.5*pa.dot(pb)*Q.square()/(pa.dot(Q)*pb.dot(Q))
                    if self.distribution_type == 'bulk':
                        #The factor xi^2 below corrects the flux factor used in the bulk BS which has a 1/xi^2 too many
                        #A more permanent change is warranted after testing.
                        #See github issue #9 for reference 
                        kernel = EpsilonExpansion({0:xi**2*HE.integrated_bs_bulk_finite(dipole_invariant,xi)})
                    elif self.distribution_type == 'counterterm':
                        kernel = EpsilonExpansion({0:HE.integrated_bs_counterterm_finite(dipole_invariant,xi)})
                    elif self.distribution_type == 'endpoint':
                        kernel = EpsilonExpansion({-1:HE.integrated_bs_endpoint_pole(dipole_invariant),
                                                    0:HE.integrated_bs_endpoint_finite(dipole_invariant)})
                    else:
                        raise CurrentImplementationError("Distribution type '%s' not supported."
                                                                        %self.distribution_type)

                    # Former implementation of the II soft+SC. Commented by Nicolas
                    # While no longer useful, this is kept for now to remember how non-zero integrated soft shoud be implemented
                    # if self.distribution_type == 'bulk':
                    #     kernel = EpsilonExpansion({
                    #                 0 : - 16.*xi * log(1.-xi**2) /(1.-xi**2),
                    #                 -1 : 8. * xi / (1.-xi**2),
                    #                 -2 : 0.
                    #     })
                    # elif self.distribution_type == 'counterterm':
                    #     kernel = EpsilonExpansion({
                    #                 0 : -8.*log(2.*(1.-xi))/(1.-xi),
                    #                 -1 : 4./(1.-xi),
                    #                 -2 : 0.
                    #     })
                    # elif self.distribution_type == 'endpoint':
                    #     kernel = EpsilonExpansion({
                    #                 0 : pi**2./3.-4.*log(2.)**2,
                    #                 -1 : 4.*log(2.),
                    #                 -2 : -2.
                    #     })
                    # else:
                    #     raise CurrentImplementationError("Distribution type '%s' not supported."
                    #                                                     %self.distribution_type)

                evaluation['values'][(0, color_correlation_index)] = kernel*prefactor
                color_correlation_index += 1

        return evaluation