def __init__(self, wd): self.wd = wd # Load the default settings from 'default.settings' file settings = self.loadDefaultSettings() # Load the settings from user command runSettings = self.initParser(settings) # Combine the two dicts for k,v in runSettings.items(): settings[k] = v if os.path.abspath(settings['Results']) != settings['Results']: settings['Results'] = os.path.abspath(os.path.join(wd, settings['Results'])) if not (os.path.exists(settings['Results'])): os.makedirs(settings['Results']) if settings['ForceLog']: settings['LogFolder'] = 'log/' if os.path.abspath(settings['LogFolder']) != settings['LogFolder']: settings['LogFolder'] = os.path.abspath(os.path.join(wd, settings['LogFolder'])) if not (os.path.exists(settings['LogFolder'])): os.makedirs(settings['LogFolder']) self.initLogging(settings) # A useful check before going on : no export was selected if not any((settings['LatexOutput'], settings['MathematicaOutput'], settings['PythonOutput'], settings['UFOfolder'])): loggingCritical("Error : No ouput would be produced after the computation. Please choose at least one export option (Latex, Mathematica, Python, UFO).") exit() if settings['UFOfolder'] is not None: if os.path.abspath(settings['UFOfolder']) != settings['UFOfolder']: settings['UFOfolder'] = os.path.abspath(os.path.join(wd, settings['UFOfolder'])) if not (os.path.exists(settings['UFOfolder'])): os.makedirs(settings['UFOfolder']) if 'RealBasis' in settings and type(settings['RealBasis']) == str: settings['RealBasis'] = settings['RealBasis'].lower() if settings['RealBasis'] in ('none', 'true', 'false'): settings['RealBasis'] = eval(settings['RealBasis'].capitalize()) elif settings['RealBasis'] not in ('adjoint', 'all'): loggingInfo("Warning : RealBasis argument not understood. Setting it to 'adjoint'.") settings['RealBasis'] = 'adjoint' if 'MoreGroupTheoryInfo' in settings: if settings['MoreGroupTheoryInfo'] is True: settings['MoreGroupTheoryInfo'] = 10 elif settings['MoreGroupTheoryInfo'] is False: settings['MoreGroupTheoryInfo'] = 0 elif type(settings['MoreGroupTheoryInfo']) != int: loggingInfo("Warning : 'MoreGroupTheoryInfo' setting must be a boolean or a positive integer. Setting it to False.") settings['MoreGroupTheoryInfo'] = 0 else: settings['MoreGroupTheoryInfo'] = 0 self.yamlSettings = self.readModelFile(settings) self.settings = settings
def expandLagrangian(self, RGmodule): self.lagrangian = Lagrangian(self.saveSettings, self, RGmodule) loggingInfo("Done.") loggingInfo("Expanding the Lagrangian ...") self.lagrangian.expand() self.expandedPotential = self.lagrangian.expandedPotential # Read the substitutions now self.substitutions = {} self.gutNorm = {} if 'Substitutions' in self.saveSettings and self.saveSettings[ 'Substitutions'] != {}: self.substitutions = getSubstitutions( self, self.saveSettings['Substitutions'] if 'Substitutions' in self.saveSettings else {}) # If any matrix substitution is provided, check now that the shapes correspond. # This is to prevent the computation from starting if not. # Also, if the matrix satisfies Y = Diag(y), replace it by y*Identity(nG) if 'yukMat' in self.substitutions: for k, v in self.substitutions['yukMat'].items(): shape = tuple([ el for el in self.couplingStructure[k] if type(el) != bool ]) if not isinstance(v[1], DiagonalMatrix): if v[1].shape != shape: loggingCritical("Error : The shape of the matrix " + k + " given in Substitutions" + " should be " + str(shape)) exit() else: if shape[0] != shape[1]: loggingCritical( "Error in Substitutions : the 'diag' keyword cannot be used for" + " the rectangular matrix '" + k + "'") exit() cType, diagMat = self.substitutions['yukMat'][k] self.substitutions['yukMat'][k] = (cType, diagMat.arg * Identity(shape[0])) # VeVs if self.vevs != {}: for k, v in self.vevs.items(): RGmodule.Vdic[(v[0], )] = v[1] # Anomalous dimensions if self.fermionAnomalous != {}: for k, v in self.fermionAnomalous.items(): RGmodule.gammaFdic[v] = k if self.scalarAnomalous != {}: for k, v in self.scalarAnomalous.items(): RGmodule.gammaSdic[v] = k
def computeBetaFunctions(self): loggingInfo("Computing the RGES ...") for couplingType, terms in self.toCalculate.items(): if self.loopDic[couplingType] == 0: continue loggingInfo(" -> " + couplingType) for n in range(self.loopDic[couplingType]): loggingInfo(" -> " + str(n + 1) + "-loop") print_progress(0, len(terms), prefix=' ' * 8, bar_length=10, printTime=self.times) for i, term in enumerate(terms): self.allRGEs[couplingType][n].append( self.RGclasses[couplingType].compute(*term, nLoops=n)) print_progress(i + 1, len(terms), prefix=' ' * 8, bar_length=10, printTime=self.times, logProgress=True) loggingInfo(" ... Done")
def checkDependencies(): from Logging import loggingCritical, loggingInfo dep = checkDependenciesAux(requirements) if dep[0] is False: ex = False if dep[1] != []: mess = 'Error: some dependencies are missing/outdated.\n' + '\n'.join( dep[1]) loggingCritical(mess) ex = True if dep[2] != []: mess = 'Warning: some optional dependencies are missing/outdated.\n' + '\n'.join( dep[2]) loggingInfo(mess) if ex: exit()
def mapBetaFunctions(self): loggingInfo("Re-combining the RGES ...") #This is for progress bar nTot = 0 count = 0 for couplingType, RGlist in self.allRGEs.items(): nTot += len( self.potential[couplingType]) * self.loopDic[couplingType] for couplingType, RGloops in self.allRGEs.items(): mat = self.lagrangianMapping[couplingType] for n, RGlist in RGloops.items(): couplingRGEs = mat * Matrix(RGlist) # Take into account the beta-exponent expFactor = 1 if 'Anomalous' not in couplingType: exponent = self.betaExponent(n + 1) - 2 * (n + 1) if exponent != 0: expFactor = Pow(4 * pi, exponent) for pos, coupling in enumerate( list(self.potential[couplingType])): try: self.couplingRGEs[couplingType][n][coupling] = expand( couplingRGEs[pos] * expFactor) except BaseException as e: loggingCritical( f"Error expanding term at : {couplingType}, {n}, {pos}" ) loggingCritical(e) exit() count += 1 print_progress(count, nTot, prefix=' ', bar_length=20, printTime=self.times)
def initialize(self): loggingInfo("Initializing tensor quantities...", end=' ') t0 = time.time() self.constructTs() self.constructT() # Remove 'True' from tuples with adjoint Yuk/FM matrices for k in list(self.YDic.keys()): if k[-1] is True: newK = k[:3] if newK not in self.YDic: self.YDic[newK] = 0 self.YDic[newK] += self.YDic[k] del self.YDic[k] for k in list(self.MFdic.keys()): if k[-1] is True: newK = k[:2] if newK not in self.MFdic: self.MFdic[newK] = 0 self.MFdic[newK] += self.MFdic[k] del self.MFdic[k] # self.YDic = {k[:3]:v for k,v in self.YDic.items()} # self.MFdic = {k[:2]:v for k,v in self.MFdic.items()} #Initialize tensors self.initTensors() loggingInfo("Done." + ( f" ({time.time()-t0:.3f} seconds)" if self.model.times else '')) if self.model.runSettings['CheckGaugeInvariance'] is True: self.checkGaugeInvariance() else: loggingInfo("Skipping gauge invariance check.") # Close the DB, since all stored object must have been read by now self.model.idb.close()
def readModelFile(self, RunSettings): if RunSettings['Model'] == '': loggingCritical( "Error : Please, specify a .model file (using '-m' argument).") exit() else: if os.path.abspath(RunSettings['Model']) != RunSettings['Model']: RunSettings['Model'] = os.path.abspath( os.path.join(self.wd, RunSettings['Model'])) try: # Open the Yaml file and load the settings f = open(RunSettings['Model'], 'r') RunSettings['StoreModelFile'] = f.read() f.close() fString = self.parseFile(RunSettings['StoreModelFile']) if yaml.__version__ > '5.1': yamlSettings = yaml.load(fString, Loader=yaml.FullLoader) else: yamlSettings = yaml.load(fString) except yaml.scanner.ScannerError as err: loggingCritical( f"Check the YAML file {RunSettings['Model']}, impossible to load the settings:\n\n-> {err}." ) exit() except yaml.parser.ParserError as err: loggingCritical( f"Check the YAML file {RunSettings['Model']}, impossible to parse the settings:\n\n->{err}." ) exit() except IOError as errstr: loggingCritical( f"Did not find the YAML file {RunSettings['Model']}, specify the path if not in the current directory.\n\n-> {errstr}." ) exit() loggingInfo(f"Loading the YAML file: {RunSettings['Model']} ...", end=' ') # Now we want to process the settings before creating the model class # Let's first construct the dictionaries if the input is given as a list if 'Fermions' in yamlSettings and yamlSettings['Fermions'] != {}: for k, v in yamlSettings['Fermions'].items(): if type(v) == dict: continue elif type(v) == list: if len(v) == len(yamlSettings['Groups']) + 1: qnb = { grp: Q for (grp, Q) in zip(yamlSettings['Groups'], v[1:]) } yamlSettings['Fermions'][k] = { 'Gen': v[0], 'Qnb': qnb } else: loggingCritical( f"Error : The length of the lists describing fermions should be 1 + {len(yamlSettings['Groups'])}, " + f"corresponding to generation + various quantum numbers. ('{k} : {v}')" ) exit() else: loggingCritical( f"Error : Fermions should either be described by a dictionary or a list. ('{k} : {v}')" ) exit() if 'RealScalars' in yamlSettings and yamlSettings[ 'RealScalars'] != {}: for k, v in yamlSettings['RealScalars'].items(): if type(v) == dict: if len(v) == 1 and 'Qnb' in v: yamlSettings['RealScalars'][k] = v['Qnb'] elif type(v) == list: if len(v) == len(yamlSettings['Groups']): qnb = { grp: Q for (grp, Q) in zip(yamlSettings['Groups'], v) } yamlSettings['RealScalars'][k] = qnb else: loggingCritical( f"Error : The length of the lists describing real scalars should be {len(yamlSettings['Groups'])}, " + f"corresponding to the various quantum numbers. ('{k} : {v}')" ) exit() else: loggingCritical( f"Error : Real scalars should either be described by a dictionary or a list. ('{k} : {v}')" ) exit() # For complex scalars, also check that the pairs [Pi, Sigma] are only used once if 'CplxScalars' in yamlSettings and 'ComplexScalars' not in yamlSettings: yamlSettings['ComplexScalars'] = yamlSettings['CplxScalars'] if 'ComplexScalars' in yamlSettings and yamlSettings[ 'ComplexScalars'] != {}: realFieldsDic = {} for k, v in yamlSettings['ComplexScalars'].items(): if type(v) == dict: pass elif type(v) == list: if len(v) == len(yamlSettings['Groups']) + 3: qnb = { grp: Q for (grp, Q) in zip(yamlSettings['Groups'], v[3:]) } yamlSettings['ComplexScalars'][k] = { 'RealFields': [v[0], v[1]], 'Norm': v[2], 'Qnb': qnb } else: loggingCritical( f"Error : The length of the lists describing complex scalars should be 3 + {len(yamlSettings['Groups'])}, " + f"corresponding to Re + Im + norm + various quantum numbers. ('{k} : {v}')" ) exit() else: loggingCritical( f"Error : Complex scalars should either be described by a dictionary or a list. ('{k} : {v}')" ) exit() rf = tuple(yamlSettings['ComplexScalars'][k]['RealFields']) if rf not in realFieldsDic: realFieldsDic[rf] = k else: loggingCritical( f"Error in complex scalar '{k}' : the real fields couple {rf} is already used in '{realFieldsDic[rf]}'" ) exit() if 'Potential' in yamlSettings and yamlSettings['Potential'] != {}: labels = ('QuarticTerms', 'Yukawas', 'TrilinearTerms', 'ScalarMasses', 'FermionMasses') for cType in labels: if cType in yamlSettings['Potential'] and yamlSettings[ 'Potential'][cType] != {}: for coupling, term in yamlSettings['Potential'][ cType].items(): if type(term) == str: # This is an explicit math expression pass elif type(term) == dict: # This is a dict with no values, containing : # { 'mathExpression', assumption1, assumption2, ... } tup = list(term.keys()) if len(tup) > 1: tup = tuple([tup[0]] + [el.lower() for el in tup[1:]]) else: tup = tup[0] yamlSettings['Potential'][cType][ coupling] = tup else: loggingCritical( f"Could not understand the term : {term}") exit() loggingInfo("Done.") return yamlSettings
def exports(runSettings, model): loggingInfo("Exporting results...") tmpWD = os.getcwd() # Create a folder with the name of the model if runSettings['CreateFolder'] is True: path = os.path.join(runSettings['Results'], model._Name) if not (os.path.exists(path)): os.makedirs(path) else: path = runSettings['Results'] if runSettings['LatexOutput'] is True: from Latex import LatexExport loggingInfo("\tExporting to Latex...", end=' ') latex = LatexExport(model) latex.write(os.path.join(path, model._Name + '.tex')) loggingInfo("Done.") if runSettings['MathematicaOutput'] is True: from Mathematica import MathematicaExport loggingInfo("\tExporting to Mathematica...", end=' ') mathematica = MathematicaExport(model) mathematica.write(os.path.join(path, model._Name + '.m')) loggingInfo("Done.") if runSettings['PythonOutput'] is True: from Python import PythonExport # If Latex export is disabled, create a Latex object anyway # to get the latex substitutions if runSettings['LatexOutput'] is False: from Latex import LatexExport latex = LatexExport(model, getLatexSubs=True) loggingInfo("\tExporting to Python...", end=' ') try: python = PythonExport(model, latexSubs=latex.latex) python.write(path) except TypeError as e: print('\nError : ' + str(e)) else: loggingInfo("Done.") if runSettings['UFOfolder'] is not None: from UFO import UFOExport loggingInfo("\tExporting to UFO...", end=' ') try: ufo = UFOExport(model) ufo.write(runSettings['UFOfolder']) except TypeError as e: loggingCritical("An error occurred during the UFO export : \n" + str(e)) else: loggingInfo("Done.") # Copy the .model file in the results folder if runSettings['CopyModelFile']: fName = os.path.join(path, os.path.basename(runSettings['Model'])) s = "# This model file was automatically copied by PyR@TE 3 on " + time.ctime() + "\n" s += runSettings['StoreModelFile'] try: f = open(fName, 'w') f.write(s) f.close() except: loggingCritical("Error while copying the model file in the results folder.") # Now apply possible user-defined commands from 'default.settings' commands = [cmd.strip() for cmd in runSettings['EndCommands'].replace('[name]', model._Name).split(',')] loggingInfo("Running user-defined commands : ") os.chdir(path) shell = (sys.platform.startswith('win')) for cmd in commands: loggingInfo("\t-> '" + cmd + "'") try: run(cmd.split(' '), shell=shell, stdout=DEVNULL, stderr=STDOUT, check=True) except CalledProcessError as e: loggingCritical("An error occurred when running the command. Skipping.") loggingCritical(' >> ' + str(e)) os.chdir(tmpWD) # This is for debugging, remove later model.latex = latex # model.python = python
def checkGaugeInvariance(self): loggingInfo("Checking gauge invariance ...", end=' ') t0 = time.time() # fermionGauge = tensorAdd(tensorContract(self.T(A_,i_,j_), # self.T(B_,j_,k_), # freeDummies=[A_,B_,i_,k_], # doit=True) , # tensorMul(-1, tensorContract(self.T(B_,i_,j_), # self.T(A_,j_,k_), # freeDummies=[A_,B_,i_,k_], # doit=True)) , # tensorMul(-I, tensorContract(self.f(A_,B_,C_), # self.T(C_,i_,k_), # freeDummies=[A_,B_,i_,k_], # doit=True))) # scalarGauge = tensorAdd(tensorContract(self.Ts(A_,i_,j_), # self.Ts(B_,j_,k_), # freeDummies=[A_,B_,i_,k_], # doit=True) , # tensorMul(-1, tensorContract(self.Ts(B_,i_,j_), # self.Ts(A_,j_,k_), # freeDummies=[A_,B_,i_,k_], # doit=True)) , # tensorMul(-I, tensorContract(self.f(A_,B_,C_), # self.Ts(C_,i_,k_), # freeDummies=[A_,B_,i_,k_], # doit=True))) # if fermionGauge != {}: # loggingCritical("Basic Lie algebra commutation relations are not satisfied among fermions.\n" # +"Please contact the author.") # exit() # if scalarGauge != {}: # loggingCritical("Basic Lie algebra commutation relations are not satisfied among scalars.\n" # +"Please contact the author.") # exit() yuk = tensorAdd( tensorMul( -1, tensorContract(self.Tt(A_, i_, j_), self.y(a_, j_, k_), freeDummies=[A_, a_, i_, k_], doit=True)), tensorContract(self.y(a_, i_, j_), self.T(A_, j_, k_), freeDummies=[A_, a_, i_, k_], doit=True), tensorContract(self.y(b_, i_, k_), self.Ts(A_, b_, a_), freeDummies=[A_, a_, i_, k_], doit=True)) fermionMass = tensorAdd( tensorMul( -1, tensorContract(self.Tt(A_, i_, j_), self.M(j_, k_), freeDummies=[A_, i_, k_], doit=True)), tensorContract(self.M(i_, j_), self.T(A_, j_, k_), freeDummies=[A_, i_, k_], doit=True)) quartics = tensorAdd( tensorContract(self.Ts(A_, a_, e_), self.l(e_, b_, c_, d_), freeDummies=[A_, a_, b_, c_, d_], doit=True), tensorContract(self.Ts(A_, b_, e_), self.l(a_, e_, c_, d_), freeDummies=[A_, a_, b_, c_, d_], doit=True), tensorContract(self.Ts(A_, c_, e_), self.l(a_, b_, e_, d_), freeDummies=[A_, a_, b_, c_, d_], doit=True), tensorContract(self.Ts(A_, d_, e_), self.l(a_, b_, c_, e_), freeDummies=[A_, a_, b_, c_, d_], doit=True)) trilinears = tensorAdd( tensorContract(self.Ts(A_, a_, e_), self.h(e_, b_, c_), freeDummies=[A_, a_, b_, c_], doit=True), tensorContract(self.Ts(A_, b_, e_), self.h(a_, e_, c_), freeDummies=[A_, a_, b_, c_], doit=True), tensorContract(self.Ts(A_, c_, e_), self.h(a_, b_, e_), freeDummies=[A_, a_, b_, c_], doit=True)) scalarMass = tensorAdd( tensorContract(self.Ts(A_, a_, e_), self.mu(e_, b_), freeDummies=[A_, a_, b_], doit=True), tensorContract(self.Ts(A_, b_, e_), self.mu(a_, e_), freeDummies=[A_, a_, b_], doit=True)) def which(dic): problematicCouplings = {} for k, el in dic.items(): names = [ obj for obj in el.atoms() if not obj.is_number and not (hasattr(obj, 'is_Identity') and obj.is_Identity) ] for c in names: if str(c) not in problematicCouplings: problematicCouplings[str(c)] = set() problematicCouplings[str(c)].add(k[0][0]) return "\n\t" + "\n\t".join([ str(k) + ' (' + ', '.join([ self.model.gaugeGroupsList[g].name for g in sorted(list(v)) ]) + ')' for k, v in problematicCouplings.items() ]) if yuk != {}: loggingCritical( "Gauge invariance is not satisfied by the following Yukawa couplings :" + which(yuk)) if quartics != {}: loggingCritical( "Gauge invariance is not satisfied by the following quartic couplings :" + which(quartics)) if fermionMass != {}: loggingCritical( "Gauge invariance is not satisfied by the following fermion mass couplings :" + which(fermionMass)) if trilinears != {}: loggingCritical( "Gauge invariance is not satisfied by the following trilinear couplings :" + which(trilinears)) if scalarMass != {}: loggingCritical( "Gauge invariance is not satisfied by the following scalar mass couplings :" + which(scalarMass)) if any([ el != {} for el in (yuk, quartics, fermionMass, trilinears, scalarMass) ]): exit() loggingInfo("All OK !" + ( f" ({time.time()-t0:.3f} seconds)" if self.model.times else ''))
def constructMapping(self, RGmodule): loggingInfo("Mapping the model onto the general Lagrangian ...") #Gauge couplings mapping, taking into account possible kinetic mixing noMix = {} mix = {} alreadyTaken = set() for el in itertools.combinations_with_replacement( range(RGmodule.nGi), 2): A, B = [RGmodule.gi[i] for i in el] c = RGmodule.G_(A, B) if c != 0 and c not in alreadyTaken: dic = noMix if A == B else mix if not self.upper: A, B = B, A dic[(A, B)] = len(dic) alreadyTaken.add(c) newInds = {**noMix, **{k: v + len(noMix) for k, v in mix.items()}} gaugeMatrix = zeros(len(newInds)) def delta(A, B): if A == B: return 1 return 0 def G(A, B): if RGmodule.G_(A, B) == 0: return 0 if not self.kinMix or A not in RGmodule.Ugauge or B not in RGmodule.Ugauge: return sqrt(RGmodule.G_(A, B)).args[0] i, j = RGmodule.Ugauge.index(A), RGmodule.Ugauge.index(B) return self.kinMat[i, j] for (A, B), X in newInds.items(): for (C, D), Y in newInds.items(): gaugeMatrix[X, Y] = G(B, D) * delta(A, C) + G(A, D) * delta(B, C) gaugeMatrix = simplify(gaugeMatrix.inv()) couplingType = 'GaugeCouplings' self.potential[couplingType] = {} for c in self.gaugeCouplings: self.potential[couplingType][c] = 0 self.lagrangianMapping[couplingType] = gaugeMatrix * self.betaFactor self.toCalculate[couplingType] = list(newInds.keys()) count = 0 translation = self.translateDic(RGmodule) for couplingType in self.potential: if couplingType == 'Definitions': continue if (couplingType in translation and self.potential[couplingType] != {} and translation[couplingType] != {}): coeffList = [c for c in self.potential[couplingType].keys()] mappingMatrix = SparseMatrix(len(coeffList), len(coeffList), 0) dicList = [] auxDic = {} sortFunc = lambda x: (len(set(x[0])), len(x[1].as_coeff_add()[1]), x[0]) for key, val in translation[couplingType].items(): if key[-1] is True: continue if val not in auxDic: auxDic[val] = key elif sortFunc((key, val)) < sortFunc((auxDic[val], val)): auxDic[val] = key rank = 0 for v, k in auxDic.items(): matTry = self.fillMappingMatrix(mappingMatrix, rank, coeffList, (k, v)) newRank = matTry.rank() if (newRank > rank): mappingMatrix = matTry rank = newRank dicList.append(k) count += 1 print_progress(count, self.nCouplings, prefix=' ' * 4, bar_length=20, printTime=self.times, logProgress=True) if newRank == len(coeffList): self.lagrangianMapping[couplingType] = Matrix( mappingMatrix).inv() * self.betaFactor break else: # The mapping matrix is not invertible ns = mappingMatrix.nullspace() cVec = Matrix([ Symbol('O(' + el + ')') for el in coeffList ]).transpose() errorMess = "\n\nError in Lagrangian mapping: matrix of couplings is not invertible. " errorMess += f"The following operators in '{couplingType}' are linearly dependent:\n\n" for vec in ns: errorMess += f" {(cVec*vec)[0,0]} = 0\n" loggingCritical(errorMess[:-1]) exit() self.toCalculate[couplingType] = dicList # Add vevs and anomalous dimensions by hand (not related to the Lagrangian) if self.vevs != {}: couplingType = 'Vevs' self.potential[couplingType] = {} for c in self.vevs: self.potential[couplingType][c] = 0 self.lagrangianMapping[couplingType] = eye(len( self.vevs)) * self.betaFactor self.toCalculate[couplingType] = list(RGmodule.Vdic.keys()) if self.fermionAnomalous != {}: couplingType = 'FermionAnomalous' self.potential[couplingType] = {} for c in self.fermionAnomalous: self.potential[couplingType][c] = 0 self.lagrangianMapping[couplingType] = eye( len(self.fermionAnomalous)) self.toCalculate[couplingType] = list(RGmodule.gammaFdic.keys()) if self.scalarAnomalous != {}: couplingType = 'ScalarAnomalous' self.potential[couplingType] = {} for c in self.scalarAnomalous: self.potential[couplingType][c] = 0 self.lagrangianMapping[couplingType] = eye( len(self.scalarAnomalous)) self.toCalculate[couplingType] = list(RGmodule.gammaSdic.keys())
def doSubstitutions(self): loggingInfo("Applying substitutions ...") doSubstitutions(self, self.substitutions)
def getParticles(self, settings): def completeTrivialReps(dic): for k, v in self.gaugeGroups.items(): if k not in dic['Qnb']: if v.abelian: dic['Qnb'][k] = 0 else: dic['Qnb'][k] = 1 for key, value in settings.items(): if key == 'Fermions': self.Fermions = value antiFermions = {} # Create the particle and store it in Fermions for part, val in value.items(): completeTrivialReps(val) self.Fermions[part] = Particle(part, val, self.gaugeGroups, self.idb) antiFermions[part + 'bar'] = self.Fermions[part].antiParticle() self.Fermions.update(antiFermions) elif key == 'RealScalars': # Copy the particles in the class for part, qnb in value.items(): norm = None if 'Norm' in qnb: norm = self.parseMathExpr(qnb['Norm']) if 'Qnb' not in qnb: Qnb = {'Gen': 1, 'Qnb': qnb} else: Qnb = qnb completeTrivialReps(Qnb) self.Scalars[part] = Particle(part, Qnb, self.gaugeGroups, self.idb) # Now check that the representations are compatible with a real scalar for gName, g in self.gaugeGroups.items(): if g.abelian and self.Scalars[part].Qnb[gName] != 0: loggingCritical( f"\nError: real scalar '{part}' cannot be charged under the abelian gauge factor {gName}" ) exit() if not g.abelian: rep = self.Scalars[part].Qnb[gName] dimR = g.dimR(rep) if dimR == 1: continue fs = self.idb.get(g.type, 'frobenius', rep) if fs == 1: loggingCritical( f"\nError: real scalar '{part}' cannot transform under the complex representation '{dimR}' of {gName}" ) exit() elif fs == -1: self.Scalars[part].pseudoRealReps.append( (gName, g, rep, dimR)) if self.Scalars[part].pseudoRealReps != []: if norm is None: loggingInfo( f"Warning: self-conjugate scalar '{part}' should be given a norm. Assuming 1/sqrt(2) by default." ) norm = 1 / sqrt(2) self.Scalars[part].pseudoNorm = norm elif norm is not None: loggingInfo( f"Warning: ignoring the unnecessary 'Norm' keyword in real scalar '{part}'." ) elif key == 'Potential': self.potential = value self.Particles.update(self.Fermions) self.Particles.update(self.Scalars) if 'ComplexScalars' in settings: for part, setts in settings['ComplexScalars'].items(): setts['Norm'] = self.parseMathExpr(setts['Norm']) if 'Gen' not in setts: setts['Gen'] = 1 completeTrivialReps(setts) self.ComplexScalars[part] = ComplexScalar( part, setts, self.gaugeGroups, self.idb) self.ComplexScalars[ part + 'bar'] = self.ComplexScalars[part].antiParticle() for r in self.ComplexScalars[part].realFields: self.Scalars[str(r)] = r self.Particles.update(self.ComplexScalars) nF = 0 for fName, f in self.Fermions.items(): ranges = [r for r in f.indicesRange.values()] nonNullRanges = [r for r in ranges if r != 0] if nonNullRanges == []: #Singlet tup = [nF, f, tuple([-1] * len(ranges))] nF += 1 self.allFermions[fName] = tuple(tup) else: for el in itertools.product( *[(list(range(r)) if r != 0 else [-1]) for r in ranges]): tup = [ nF, f, tuple(el), parse_expr(str(fName) + str([n for n in el if n != -1]), local_dict={ str(f._name): IndexedBase(str(f._name)) }) ] nF += 1 self.allFermions[fName + str([n for n in el if n != -1])] = tuple(tup) nS = 0 for sName, s in self.Scalars.items(): ranges = [r for r in s.indicesRange.values()] nonNullRanges = [r for r in ranges if r != 0] if nonNullRanges == []: #Singlet self.allScalars[sName] = (nS, s, tuple([-1] * len(ranges))) nS += 1 else: storeNs = nS for el in itertools.product( *[(list(range(r)) if r != 0 else [-1]) for r in ranges]): tup = [ nS, s, tuple(el), parse_expr(str(sName) + str([n for n in el if n != -1]), local_dict={ str(s._name): IndexedBase(str(s._name)) }) ] self.allScalars[sName + str([n for n in el if n != -1])] = tuple(tup) nS += 1 # If the real scalar transforms under pseudo-real reps, some more work is needed if s.pseudoRealReps != []: s.pseudoScalarHandling(list(self.allScalars.items()), storeNs) self.symbolicGen = any( [isinstance(p.gen, Symbol) for p in self.Particles.values()])
def __init__(self, settings, runSettings, idb, realBasis='all'): ############### # Definitions # ############### self._Name = settings['Name'].replace(' ', '_').replace('/', '_').replace( '\\', '_') self._Author = settings['Author'] self._Date = settings['Date'] self.times = runSettings['PrintComputationTimes'] self.saveSettings = copy.deepcopy(settings) self.runSettings = runSettings # Declare an interactive db access object self.idb = idb self.loopDic = {} self.validateSettings(settings, runSettings) loggingInfo("Loading the model ...", end=' ') #################### # Get gauge groups # #################### self.gaugeGroups = {} self.gaugeGroupsList = [] self.UgaugeGroups = [] self.realBasis = runSettings['RealBasis'] self.getGaugegroups(settings, realBasis=self.realBasis) # Number of U1 gauge factors self.nU = [g.abelian for g in self.gaugeGroupsList].count(True) self.kinMix = (self.nU > 1 and runSettings['NoKinMix'] is False) ################# # Get particles # ################# self.Particles = {} self.Fermions = {} self.Scalars = {} self.ComplexScalars = {} #The following dicts contain all the components of the fields self.allFermions = {} self.allScalars = {} self.symbolicGen = False self.getParticles(settings) ###################### # Read the potential # ###################### self.potential = settings['Potential'] self.assumptions = {} self.getAssumptions() #Read the vevs + possible gauge fixing self.vevs = {} self.getVevs(settings) self.gaugeFixing = None if 'GaugeParameter' in settings: self.gaugeFixing = self.parseMathExpr(settings['GaugeParameter'], real=True) #Read the anomalous dimensions self.scalarAnomalous = {} self.fermionAnomalous = {} self.getAnomalous(settings) #Identify the various couplings of the model self.allCouplings = {} self.couplingsPos = {'GaugeCouplings': {}} self.YukPos = {} self.ExplicitMatrices = [] self.couplingStructure = {} self.gaugeCouplings = [] for i, gp in enumerate(self.gaugeGroupsList): self.allCouplings[str(gp.g)] = ('GaugeCouplings', gp.g) self.couplingsPos['GaugeCouplings'][str(gp.g)] = i self.gaugeCouplings.append(str(gp.g)) self.mixedGaugeCouplings = [] self.upper = True if self.kinMix: self.kinMat = zeros(self.nU) for i in range(self.nU): for j in range(self.nU): if (self.upper and i < j) or (not self.upper and i > j): c = 'g_' + str(i + 1) + str(j + 1) pos = [ self.gaugeGroupsList.index(self.UgaugeGroups[k]) for k in (i, j) ] self.allCouplings[c] = ('GaugeCouplings', Symbol(c, real=True)) self.couplingsPos['GaugeCouplings'][c] = max(pos) + .5 self.kinMat[i, j] = Symbol(c, real=True) self.mixedGaugeCouplings.append(self.kinMat[i, j]) self.gaugeCouplings.append(c) elif i == j: self.kinMat[i, j] = self.UgaugeGroups[i].g self.kinMat2 = self.kinMat * self.kinMat.transpose() # Fill the dics related to the various couplings of the model self.nCouplings = 0 for couplingType, terms in self.potential.items(): if couplingType == 'Definitions': continue tKeys = list(terms.keys()) for coupling in terms: self.nCouplings += 1 # Add the various couplings to the Couplings dictionnary if couplingType == 'Yukawas': self.YukPos[coupling] = tKeys.index(coupling) if couplingType == 'FermionMasses': self.YukPos[coupling] = 100 + tKeys.index(coupling) if couplingType not in self.couplingsPos: self.couplingsPos[couplingType] = {} self.couplingsPos[couplingType][coupling] = tKeys.index( coupling) self.allCouplings[coupling] = couplingType if self.vevs != {}: self.couplingsPos['Vevs'] = {} for i, (k, v) in enumerate(self.vevs.items()): self.allCouplings[k] = ('Vevs', v[1]) self.couplingsPos['Vevs'][k] = i # Read the beta-factor if 'BetaFactor' in settings: if type(settings['BetaFactor']) not in (list, tuple): self.betaFactor = self.parseMathExpr(settings['BetaFactor']) self.betaExponent = lambda n: 2 * n else: self.betaFactor = self.parseMathExpr(settings['BetaFactor'][0]) self.betaExponent = self.parseMathExpr( settings['BetaFactor'][1]) if self.betaExponent.find('n') == set(): if self.betaExponent == 0: self.betaExponent = lambda n: 0 else: loggingCritical( "Error : the beta-exponent must be an integer function of 'n'. Setting it to default (2*n)." ) self.betaExponent = lambda n: 2 * n else: lambdaExponent = lambdify(Symbol('n'), self.betaExponent) self.betaExponent = lambda n: lambdaExponent(n) if self.betaFactor == 0: loggingCritical("Error : beta-factor cannot be 0. Exiting.") exit() else: self.betaFactor = Integer(1) self.betaExponent = lambda n: Integer(2) * n self.translateContent = { 'GaugeCouplings': (0, 0), 'Yukawas': (2, 1), 'QuarticTerms': (0, 4), 'TrilinearTerms': (0, 3), 'ScalarMasses': (0, 2), 'FermionMasses': (2, 0), 'FermionAnomalous': (2, 0), 'ScalarAnomalous': (0, 2), 'Vevs': (0, 1) } self.translateDic = lambda RGmodule: { 'Yukawas': RGmodule.YDic, 'QuarticTerms': RGmodule.LambdaDic, 'TrilinearTerms': RGmodule.Hdic, 'ScalarMasses': RGmodule.MSdic, 'FermionMasses': RGmodule.MFdic, 'FermionAnomalous': RGmodule.gammaFdic, 'ScalarAnomalous': RGmodule.gammaSdic, 'Vevs': RGmodule.Vdic } self.translateBetaFunction = { 'GaugeCouplings': GaugeBetaFunction, 'Yukawas': YukawaBetaFunction, 'QuarticTerms': QuarticBetaFunction, 'TrilinearTerms': TrilinearBetaFunction, 'ScalarMasses': ScalarMassBetaFunction, 'FermionMasses': FermionMassBetaFunction, 'FermionAnomalous': FermionAnomalous, 'ScalarAnomalous': ScalarAnomalous, 'Vevs': VevBetaFunction } self.lagrangianMapping = {} self.toCalculate = {} self.RGclasses = {} self.allRGEs = {} self.couplingRGEs = {} self.NonZeroCouplingRGEs = {} self.NonZeroDiagRGEs = {}
def validateSettings(self, settings, runSettings): """Implements the different checks carried out on the input provided by the user""" ######################################## # Check the gauge groups and particles # ######################################## if not 'Groups' in settings: loggingCritical("Error : No gauge groups specified. Exiting.") exit() else: groups = settings['Groups'].keys() allParticles = {} if 'Fermions' in settings and settings['Fermions'] != {}: for k, v in settings['Fermions'].items(): if k not in allParticles: allParticles[k] = v else: loggingCritical( f"Error : Particle '{k}' cannot be defined twice. Please check the model file." ) exit() if 'RealScalars' in settings: for k, v in settings['RealScalars'].items(): if k not in allParticles: allParticles[k] = v else: loggingCritical( f"Error : Particle '{k}' cannot be defined twice. Please check the model file." ) exit() if 'ComplexScalars' in settings: for k, v in settings['ComplexScalars'].items(): twice = [] for f in v['RealFields']: if '*' in f or '+' in f or '-' in f: loggingCritical( f"Error : Invalid field name '{f}' in RealScalars of particle '{k}'. Exiting" ) exit() if f in allParticles: twice.append(f) else: allParticles[f] = None if k in allParticles: twice.append(k) if twice != []: for el in twice: loggingCritical( f"Error : Particle '{el}' cannot be defined twice. Please check the model file." ) exit() allParticles[k] = v # Check that all the gauge groups are defined above for part, val in allParticles.items(): if val is None: continue if 'Qnb' in val: tags = val['Qnb'].keys() else: tags = val.keys() if not all([el in groups for el in tags]): loggingCritical( f"Error : the particle '{part}' is charged under an unknown gauge group." ) exit() if not 'Potential' in settings: settings['Potential'] = {} self.saveSettings['Potential'] = {} ################ # RUN settings # ################ if 'Loops' in runSettings: maxLoops = { 'GaugeCouplings': 3, 'Yukawas': 2, 'QuarticTerms': 2, 'TrilinearTerms': 2, 'ScalarMasses': 2, 'FermionMasses': 2, 'Vevs': 2 } if type(runSettings['Loops'] ) == str and runSettings['Loops'].lower() == 'max': loops = 'max' else: try: loops = eval(str(runSettings['Loops'])) except: loops = str(runSettings['Loops']) if type(loops) == int: self.nLoops = 7 * [loops] self.loopDic['GaugeCouplings'] = self.nLoops[0] self.loopDic['Yukawas'] = self.nLoops[1] self.loopDic['QuarticTerms'] = self.nLoops[2] self.loopDic['TrilinearTerms'] = self.nLoops[3] self.loopDic['ScalarMasses'] = self.nLoops[4] self.loopDic['FermionMasses'] = self.nLoops[5] self.loopDic['Vevs'] = self.nLoops[6] elif type(loops) == list and len(loops) == 3: self.nLoops = loops self.loopDic['GaugeCouplings'] = self.nLoops[0] self.loopDic['Yukawas'] = self.nLoops[1] self.loopDic['QuarticTerms'] = self.nLoops[2] self.loopDic['FermionMasses'] = self.loopDic['Yukawas'] self.loopDic['TrilinearTerms'] = self.loopDic['QuarticTerms'] self.loopDic['ScalarMasses'] = self.loopDic['QuarticTerms'] self.loopDic['Vevs'] = self.loopDic['QuarticTerms'] # elif type(loops) == list and len(loops) == 6: # self.nLoops = loops # self.loopDic['GaugeCouplings'] = self.nLoops[0] # self.loopDic['Yukawas'] = self.nLoops[1] # self.loopDic['QuarticTerms'] = self.nLoops[2] # self.loopDic['TrilinearTerms'] = self.nLoops[3] # self.loopDic['ScalarMasses'] = self.nLoops[4] # self.loopDic['FermionMasses'] = self.nLoops[5] # self.loopDic['Vevs'] = self.loopDic['QuarticTerms'] # elif type(loops) == list and len(loops) == 7: # self.nLoops = loops # self.loopDic['GaugeCouplings'] = self.nLoops[0] # self.loopDic['Yukawas'] = self.nLoops[1] # self.loopDic['QuarticTerms'] = self.nLoops[2] # self.loopDic['TrilinearTerms'] = self.nLoops[3] # self.loopDic['ScalarMasses'] = self.nLoops[4] # self.loopDic['FermionMasses'] = self.nLoops[5] # self.loopDic['Vevs'] = self.nLoops[6] elif type(loops) == str and loops == 'max': self.nLoops = [] for k, v in maxLoops.items(): self.nLoops.append(v) self.loopDic[k] = v else: loggingCritical( "Error : Loops should be in one of the following forms :\n" + "\t- A single integer\n" + # "\t- A list of three, six or seven integers\n" + "\t- A list of three integers\n" + "\t- The keyword 'max'") exit() # Nothing to calculate ? if all([el == 0 for el in self.nLoops]): loggingCritical("Nothing to calculate ! Exiting.") exit() # If loop orders are too high, set them to the max allowed value for k, v in maxLoops.items(): if self.loopDic[k] > v: loggingInfo( f"Warning : Loop level for '{k}' is too high ({self.loopDic[k]}). Setting it to {v}" ) self.loopDic[k] = v # Anomalous self.loopDic['ScalarAnomalous'] = self.loopDic['QuarticTerms'] self.loopDic['FermionAnomalous'] = self.loopDic['Yukawas']
RGmodule.initialize() except SystemExit: exit() except KeyboardInterrupt: exit() except: error = True track = traceback.format_exc() finally: idb.close() if error: print(track) exit() # Actual beta-function computation model.defineBetaFunctions(RGmodule) model.computeBetaFunctions() model.mapBetaFunctions() # Apply the user-defined substitutions (replacements, GUT normalization, ...) model.doSubstitutions() loggingInfo(f"-> All done in {time.time()-t0:.3f} seconds.", end='\n\n') # Now export the results import Exports Exports.exports(runSettings, model) loggingInfo("End of the run.")
def constructMapping(self, RGmodule): loggingInfo("Mapping the model onto the general Lagrangian ...") #Gauge couplings mapping, taking into account possible kinetic mixing noMix = {} mix = {} alreadyTaken = set() for el in itertools.combinations_with_replacement( range(RGmodule.nGi), 2): A, B = [RGmodule.gi[i] for i in el] c = RGmodule.G_(A, B) if c != 0 and c not in alreadyTaken: dic = noMix if A == B else mix if not self.upper: A, B = B, A dic[(A, B)] = len(dic) alreadyTaken.add(c) newInds = {**noMix, **{k: v + len(noMix) for k, v in mix.items()}} gaugeMatrix = zeros(len(newInds)) def delta(A, B): if A == B: return 1 return 0 def G(A, B): if RGmodule.G_(A, B) == 0: return 0 if not self.kinMix or A not in RGmodule.Ugauge or B not in RGmodule.Ugauge: return sqrt(RGmodule.G_(A, B)).args[0] i, j = RGmodule.Ugauge.index(A), RGmodule.Ugauge.index(B) return self.kinMat[i, j] for (A, B), X in newInds.items(): for (C, D), Y in newInds.items(): gaugeMatrix[X, Y] = G(B, D) * delta(A, C) + G(A, D) * delta(B, C) gaugeMatrix = simplify(gaugeMatrix.inv()) couplingType = 'GaugeCouplings' self.potential[couplingType] = {} for c in self.gaugeCouplings: self.potential[couplingType][c] = 0 self.lagrangianMapping[couplingType] = gaugeMatrix * self.betaFactor self.toCalculate[couplingType] = list(newInds.keys()) count = 0 translation = self.translateDic(RGmodule) for couplingType in self.potential: if couplingType == 'Definitions': continue if (couplingType in translation and self.potential[couplingType] != {} and translation[couplingType] != {}): coeffList = [] dicList = [] mappingMatrix = [] sortedList = [] coeffList = [c for c in self.potential[couplingType].keys()] mappingMatrix = SparseMatrix(len(coeffList), len(coeffList), 0) sortedList = sorted( [(key, val) for key, val in translation[couplingType].items() if not (type(key[-1]) == bool and key[-1] == True)], key=lambda x: (len(set(x[0])), len(x[1].as_coeff_add()[1]), x[0])) trys = 0 for el in sortedList: trys += 1 matTry = self.fillMappingMatrix(mappingMatrix, coeffList, el) if (matTry.rank() > mappingMatrix.rank()): mappingMatrix = matTry dicList.append(el[0]) count += 1 print_progress(count, self.nCouplings, prefix=' ' * 4, bar_length=20, printTime=self.times, logProgress=True) if matTry.rank() == len(coeffList): break try: self.lagrangianMapping[couplingType] = Matrix( mappingMatrix).inv() * self.betaFactor except: # from sympy import pretty loggingCritical( "\nError in Lagrangian mapping : matrix of couplings is not invertible." ) loggingCritical("\tCoupling type : " + couplingType) # loggingCritical("\t\t" + pretty(mappingMatrix).replace("\n", "\n\t\t")) exit() self.toCalculate[couplingType] = dicList # Add vevs and anomalous dimensions by hand (not related to the Lagrangian) if self.vevs != {}: couplingType = 'Vevs' self.potential[couplingType] = {} for c in self.vevs: self.potential[couplingType][c] = 0 self.lagrangianMapping[couplingType] = eye(len( self.vevs)) * self.betaFactor self.toCalculate[couplingType] = list(RGmodule.Vdic.keys()) if self.fermionAnomalous != {}: couplingType = 'FermionAnomalous' self.potential[couplingType] = {} for c in self.fermionAnomalous: self.potential[couplingType][c] = 0 self.lagrangianMapping[couplingType] = eye( len(self.fermionAnomalous)) self.toCalculate[couplingType] = list(RGmodule.gammaFdic.keys()) if self.scalarAnomalous != {}: couplingType = 'ScalarAnomalous' self.potential[couplingType] = {} for c in self.scalarAnomalous: self.potential[couplingType][c] = 0 self.lagrangianMapping[couplingType] = eye( len(self.scalarAnomalous)) self.toCalculate[couplingType] = list(RGmodule.gammaSdic.keys())