def checkNCPUs(self, ncpus, inputFiles): if ncpus < -1 or ncpus == 0: logger.error("Weird number of CPUs given: %d" % ncpus) sys.exit() if ncpus == -1: ncpus = runtime.nCPUs() ncpus = min(len(inputFiles), ncpus) if ncpus == 1: logger.info("We run on a single cpu") else: logger.info("We run on %d cpus" % ncpus) return ncpus
def main(args): setLogLevel(args.verbosity) if args.query: return queryCrossSections(args.filename) sqrtses = getSqrtses(args) order = getOrder(args) checkAllowedSqrtses(order, sqrtses) inputFiles = getInputFiles(args) ncpus = args.ncpus if hasattr(args, 'pythiacard'): pythiacard = args.pythiacard else: pythiacard = None if ncpus < -1 or ncpus == 0: logger.error("Weird number of CPUs given: %d" % ncpus) sys.exit() if ncpus == -1: ncpus = runtime.nCPUs() ncpus = min(len(inputFiles), ncpus) if ncpus == 1: logger.info("We run on a single cpu") else: logger.info("We run on %d cpus" % ncpus) children = [] for i in range(ncpus): pid = os.fork() chunk = inputFiles[i::ncpus] if pid < 0: logger.error("fork did not succeed! Pid=%d" % pid) sys.exit() if pid == 0: logger.debug("chunk #%d: pid %d (parent %d)." % (i, os.getpid(), os.getppid())) logger.debug(" `-> %s" % " ".join(chunk)) computeForBunch(sqrtses, order, args.nevents, chunk, not args.keep, args.LOfromSLHA, args.tofile, pythiacard=pythiacard) os._exit(0) if pid > 0: children.append(pid) for child in children: r = os.waitpid(child, 0) logger.debug("child %d terminated: %s" % (child, r)) logger.debug("all children terminated.")
def _determineNCPus ( cpus_wanted, n_files ): """ determine the number of CPUs that are to be used. :param cpus_wanted: number of CPUs specified in parameter file :param n_files: number of files to be run on :returns: number of CPUs that are to be used """ ncpusAll = runtime.nCPUs() # ncpus = parser.getint("parameters", "ncpus") ncpus = cpus_wanted if ncpus == 0 or ncpus < -1: logger.error ( "Weird number of ncpus given in ini file: %d" % ncpus ) sys.exit() if ncpus == -1 or ncpus > ncpusAll: ncpus = ncpusAll ncpus = min ( n_files, ncpus ) return ncpus
def parallel_run(verbose): if verbose: print("[runCompleteTestSuite] verbose run not implemented " "for parallel version") return try: from concurrencytest import ConcurrentTestSuite, fork_for_tests except ImportError as e: print("Need to install the module concurrencytest.") print("pip install --user concurrencytest") return from smodels.tools import runtime suite = unittest.TestLoader().discover("./") ncpus = runtime.nCPUs() ## "shuffle" the tests, so that the heavy tests get distributed ## more evenly among threads (didnt help, so I commented it out) #suite._tests = [ item for sublist in [ suite._tests[x::ncpus] \ # for x in range(ncpus) ] for item in sublist ] concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(ncpus)) runner = unittest.TextTestRunner() runner.run(concurrent_suite)
def parallel_run ( verbose ): if verbose: print ("[runCompleteTestSuite] verbose run not implemented " "for parallel version" ) return try: from concurrencytest import ConcurrentTestSuite, fork_for_tests except ImportError as e: print ( "Need to install the module concurrencytest." ) print ( "pip install --user concurrencytest" ) return from smodels.tools import runtime suite = unittest.TestLoader().discover("./") ncpus = runtime.nCPUs() ## "shuffle" the tests, so that the heavy tests get distributed ## more evenly among threads (didnt help, so I commented it out) #suite._tests = [ item for sublist in [ suite._tests[x::ncpus] \ # for x in range(ncpus) ] for item in sublist ] concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests( ncpus )) runner = unittest.TextTestRunner() runner.run(concurrent_suite)
def testPoints(fileList, inDir, outputDir, parser, databaseVersion, listOfExpRes, timeout, development, parameterFile): """ Loop over all input files in fileList with testPoint, using ncpus CPUs defined in parser :param fileList: list of input files to be tested :param inDir: path to directory where input files are stored :param outputDir: path to directory where output is stored :param parser: ConfigParser storing information from parameter.ini file :param databaseVersion: Database version (printed to output files) :param listOfExpRes: list of ExpResult objects to be considered :param timeout: set a timeout for one model point (0 means no timeout) :param development: turn on development mode (e.g. no crash report) :param parameterFile: parameter file, for crash reports :returns: printer(s) output, if not run in parallel mode """ if len(fileList) == 0: logger.error("no files given.") return None if len(fileList) == 1: return runSingleFile(fileList[0], outputDir, parser, databaseVersion, listOfExpRes, timeout, development, parameterFile) """ loop over input files and run SModelS """ ncpusAll = runtime.nCPUs() ncpus = parser.getint("parameters", "ncpus") if ncpus == 0 or ncpus < -1: logger.error("Weird number of ncpus given in ini file: %d" % ncpus) sys.exit() if ncpus == -1 or ncpus > ncpusAll: ncpus = ncpusAll logger.info("Running SModelS on %d cores" % ncpus) cleanedList = [] for f in fileList: tmp = os.path.join(inDir, f) if not os.path.isfile(tmp): logger.info("%s does not exist or is not a file. Skipping it." % tmp) continue cleanedList.append(tmp) if ncpus == 1: return runSetOfFiles(cleanedList, outputDir, parser, databaseVersion, listOfExpRes, timeout, development, parameterFile) ### now split up for every fork chunkedFiles = [cleanedList[x::ncpus] for x in range(ncpus)] children = [] for (i, chunk) in enumerate(chunkedFiles): pid = os.fork() logger.debug("Forking: %s %s %s " % (i, pid, os.getpid())) if pid == 0: logger.debug("chunk #%d: pid %d (parent %d)." % (i, os.getpid(), os.getppid())) logger.debug(" `-> %s" % " ".join(chunk)) runSetOfFiles(chunk, outputDir, parser, databaseVersion, listOfExpRes, timeout, development, parameterFile) os._exit(0) ## not sys.exit(), return, nor continue if pid < 0: logger.error("fork did not succeed! Pid=%d" % pid) sys.exit() if pid > 0: children.append(pid) for child in children: r = os.waitpid(child, 0) logger.debug("child %d terminated: %s" % (child, r)) logger.debug("all children terminated") logger.debug("returning no output, because we are in parallel mode") return None
def testNCPUs(self): ncpus = runtime.nCPUs() self.assertTrue ( ncpus >= 1 )
def main(): import argparse argparser = argparse.ArgumentParser(description='perform likelhood scans') argparser.add_argument('-n', '--number', help='which hiscore to plot [0]', type=int, default=0) argparser.add_argument('-1', '--pid1', help='pid1 [1000006]', type=int, default=1000006) argparser.add_argument('-2', '--pid2', help='pid2 [1000022]', type=int, default=1000022) argparser.add_argument( '-P', '--nproc', help= 'number of process to run in parallel. zero is autodetect. Negative numbers are added to autodetect [0]', type=int, default=0) argparser.add_argument('-m1', '--min1', help='minimum mass of pid1 [None]', type=float, default=None) argparser.add_argument('-M1', '--max1', help='maximum mass of pid1 [2200.]', type=float, default=None) argparser.add_argument('-d1', '--deltam1', help='delta m of pid1 [None]', type=float, default=None) argparser.add_argument('-m2', '--min2', help='minimum mass of pid2 [None]', type=float, default=None) argparser.add_argument('-M2', '--max2', help='maximum mass of pid2 [None]', type=float, default=None) argparser.add_argument('-d2', '--deltam2', help='delta m of pid1 [None]', type=float, default=None) argparser.add_argument('-t', '--topo', help='topology [None]', type=str, default=None) argparser.add_argument('-R', '--rundir', help='override the default rundir [None]', type=str, default=None) argparser.add_argument('-e', '--nevents', help='number of events [100000]', type=int, default=100000) argparser.add_argument( '-p', '--picklefile', help='pickle file to draw from [<rundir>/hiscore.hi]', type=str, default="default") argparser.add_argument('-D', '--draw', help='also perform the plotting, ie call plotLlhds', action='store_true') argparser.add_argument('-v', '--verbosity', help='verbosity -- debug, info, warn, err [info]', type=str, default="info") argparser.add_argument('-o', '--output', help="prefix for output file [llhd]", type=str, default="llhd") args = argparser.parse_args() rundir = setup(args.rundir) nproc = args.nproc if nproc < 1: nproc = nCPUs() + nproc if args.picklefile == "default": args.picklefile = "%s/hiscore.hi" % rundir protomodel = obtain(args.number, args.picklefile) pid1s = [args.pid1] if args.pid1 == 0: pid1s = findPids(rundir) for pid1 in pid1s: scanner = LlhdScanner(protomodel, pid1, args.pid2, nproc, rundir) args.pid1 = pid1 args = scanner.overrideWithDefaults(args) scanner.scanLikelihoodFor ( args.min1, args.max1, args.deltam1, args.min2, args.max2, args.deltam2, \ args.nevents, args.topo, args.output ) if args.draw: verbose = args.verbosity copy = True max_anas = 5 interactive = False drawtimestamp = True compress = False upload = "latest" plot = plotLlhds.LlhdPlot(pid1, args.pid2, verbose, copy, max_anas, interactive, drawtimestamp, compress, rundir, upload) plot.plot()
action="store_true" ) argparser.add_argument ( '-u', '--upload', help='choose upload directory [latest]', type=str, default="latest" ) argparser.add_argument ( '-c', '--copy', help='copy plots to ~/git/smodels.github.io/protomodels/<upload>/', action="store_true" ) argparser.add_argument ( '-N', '--notimestamp', help='dont put a timestamp on it', action="store_true" ) args = argparser.parse_args() drawtimestamp = not args.notimestamp rundir = setup( args.rundir ) nproc = args.nproc if nproc < 1: nproc = nCPUs() + nproc allpids = findPids( rundir ) pids = args.pid if pids == 0: pids = allpids if args.produce: hi = getHiscore( args.force_copy, rundir ) if args.pid2 > 0: produceSSMs( hi, args.pid, args.pid2, args.nevents, args.dry_run, nproc, args.factor, rundir = rundir ) else: produce( hi, pids, args.nevents, args.dry_run, nproc, args.factor, rundir = rundir, preserve_xsecs = args.preserve_xsecs ) pred = Predictor( 0 ) rthreshold = pred.rthreshold if args.draw: if args.pid != 0: draw( pids, args.interactive, args.pid2, args.copy, drawtimestamp, rundir, \