Пример #1
0
def main():
	ROOT.gROOT.SetBatch(True)
	
	parser = argparse.ArgumentParser(description="Merge Artus outputs per nick name.", parents=[logger.loggingParser])

	parser.add_argument("project_dir", help="Artus Project directory containing the files \"output/*/*.root\" to merge")

	args = parser.parse_args()
	logger.initLogger(args)
	
	output_dirs = glob.glob(os.path.join(args.project_dir, "output/*"))
	nick_names = [nick for nick in [output_dir[output_dir.rfind("/")+1:] for output_dir in output_dirs] if not ".tar.gz" in nick]
	outputs_per_nick = {nick : glob.glob(os.path.join(args.project_dir, "output", nick, "*.root")) for nick in nick_names}
	outputs_per_nick = {nick : files for nick, files in outputs_per_nick.iteritems() if len(files) > 0}
	
	for nick_name, output_files in pi.ProgressIterator(outputs_per_nick.iteritems(),
	                                                   length=len(outputs_per_nick),
	                                                   description="Merging Artus outputs"):
		merged_dir = os.path.join(args.project_dir, "merged", nick_name)
		if not os.path.exists(merged_dir):
			os.makedirs(merged_dir)
	
		command = "hadd -f %s %s" % (os.path.join(merged_dir, nick_name+".root"), " ".join(output_files))
		log.info(command)
		logger.subprocessCall(command.split())
		log.info("\n")
Пример #2
0
def main():
	parser = argparse.ArgumentParser(description="Compare different repository versions configured in Artus configs. The script has to be executed in the directory of the repository.", parents=[logger.loggingParser])
	parser.add_argument("files", help="Two configurations. The configs should be ordered by the expected repository version. Can be either Artus output root files or JSON text files", nargs=2)

	args = parser.parse_args()
	logger.initLogger(args)
	
	config1 = jsonTools.JsonDict(args.files[0])
	config2 = jsonTools.JsonDict(args.files[1])
	
	dirname = os.path.basename(os.getcwd())
	repo_key1 = sorted([key for key in config1.keys() if key.startswith("/") and key.endswith(dirname)], key=lambda item: len(item))[-1]
	repo_key2 = sorted([key for key in config2.keys() if key.startswith("/") and key.endswith(dirname)], key=lambda item: len(item))[-1]
	
	repo_version1 = config1[repo_key1]
	repo_version2 = config2[repo_key2]
	diff_string = "%s...%s" % (repo_version1, repo_version2)
	
	command = "git diff %s..%s" % (repo_version1, repo_version2)
	if log.isEnabledFor(logging.DEBUG):
		log.info("")
		logger.subprocessCall(shlex.split(command))
	log.info("\n"+command)
	
	popen_cout, popen_cerr = subprocess.Popen("git config remote.origin.url".split(), stdout=subprocess.PIPE).communicate()
	remote_url = popen_cout.replace("\n", "")
	github_link = os.path.join(remote_url, "compare", diff_string)
	log.info(github_link)
def do_p_values(output_dir, print_only=False):
    command = "submit.py --interactive --stable-new --pvalue-frequentist {datacards}".format(
        datacards=os.path.join(output_dir, "*")
    )
    log.info(command)
    if not print_only:
        logger.subprocessCall(shlex.split(command))
def annotate_lumi(output_dir, lumi, print_only=False):
    command = "annotate-trees.py {root_files} --tree limit --values {l} --branches lumi".format(
        root_files=" ".join(glob.glob(os.path.join(output_dir, "*", "higgsCombine*.root"))), l=lumi
    )
    log.info(command)
    if not print_only:
        logger.subprocessCall(shlex.split(command))
def do_limits(output_dir, print_only=False):
    command = "submit.py --interactive --stable-new --asymptotic {datacards}".format(
        datacards=os.path.join(output_dir, "*")
    )
    log.info(command)
    if not print_only:
        logger.subprocessCall(shlex.split(command))
Пример #6
0
def main():
	
	parser = argparse.ArgumentParser(description="Tools simplifying dCache usage.", parents=[logger.loggingParser])
	
	parser.add_argument("-c", "--command", required=True, help="Command, e.g. lcg-cp, lcg-del or ddcp.")
	parser.add_argument("-a", "--args", help="Arguments (can be left empty).")
	parser.add_argument("-s", "--src", required=True, help="Source.")
	parser.add_argument("--src-prefix", default="", help="Source prefix. \"gridka\" and \"desy\" are replaced by their dCache locations. \"\" means local path.")
	#parser.add_argument("-d", "--dst", help="Destination (can be left empty).")
	#parser.add_argument("--dst-prefix", default="", help="Destination prefix. \"gridka\" and \"desy\" are replaced by their dCache locations. \"\" means local path.")
	
	args = parser.parse_args()
	logger.initLogger(args)
	
	prefix_replacements = {
		"gridka" : "srm://dgridsrm-fzk.gridka.de:8443/srm/managerv2?SFN=",
		"desy" : "srm://dcache-se-cms.desy.de:8443/srm/managerv2?SFN=",
	}
	for replacement_from, replacement_to in prefix_replacements.items():
		if args.src_prefix == replacement_from:
			args.src_prefix = replacement_to
		#if args.dst_prefix == replacement_from:
		#	args.dst_prefix = replacement_to
	
	src_files = list_of_files(args.src_prefix, args.src)
	
	for src_file in src_files:
		logger.subprocessCall((args.command + " " + (args.args if args.args else "") + " " + args.src_prefix + src_file).split())
Пример #7
0
def main():
	
	parser = argparse.ArgumentParser(description="Merge Artus outputs per nick name.", parents=[logger.loggingParser])

	parser.add_argument("project_dir", help="Artus Project directory containing the files \"output/*/*.root\" to merge")

	args = parser.parse_args()
	logger.initLogger(args)
	
	output_dirs = glob.glob(os.path.join(args.project_dir, "output/*"))
	nick_names = [nick for nick in [output_dir[output_dir.rfind("/")+1:] for output_dir in output_dirs] if not ".tar.gz" in nick]
	
	gc_config_base_filename = os.path.expandvars("$CMSSW_BASE/src/Artus/Configuration/data/gc_merge_artus_outputs_base.conf")
	gc_config_base = ""
	with open(gc_config_base_filename) as gc_config_base_file:
		gc_config_base = gc_config_base_file.read().rstrip()
	
	gc_config = string.Template(gc_config_base).safe_substitute(
			PROJECT_DIR=args.project_dir,
			SAMPLE_NICKS=" ".join(nick_names),
			CMSSW_BASE=os.path.expandvars("$CMSSW_BASE")
	)
	
	gc_config_filename = os.path.join(args.project_dir, "gc_merge_artus_outputs.conf")
	if os.path.exists(gc_config_filename):
		os.remove(gc_config_filename)
		logger.subprocessCall(("rm -rf " + os.path.join(args.project_dir, "workdir_merge")).split())
	with open(gc_config_filename, "w") as gc_config_file:
		gc_config_file.write(gc_config)
	
	command = "go.py -Gcm 1 " + gc_config_filename
	log.info(command)
	logger.subprocessCall(command.split())
	
	log.info("Output is written to directory \"%s\"" % os.path.join(args.project_dir, "merged"))
def clear_output_dir(output_dir, print_only=False):
    if os.path.exists(output_dir):
        command = "rm -rfv {o}".format(o=output_dir)
        log.info(command)
        if not print_only:
            logger.subprocessCall(shlex.split(command))
    if not print_only:
        os.makedirs(output_dir)
Пример #9
0
def main():
	
	parser = argparse.ArgumentParser(description="Execute bash command.",
	                                 parents=[logger.loggingParser])
	
	parser.add_argument("command", nargs="+",
	                    help="Bash command to be executed.")
	
	args = parser.parse_args()
	logger.initLogger(args)
	
	logger.subprocessCall(os.path.expandvars(" ".join(args.command)), shell=True)
Пример #10
0
def download_remote_file(remote, local, offset=30, bandwidth=100):
	command = "gfal-stat --timeout {timeout} {remote}".format(timeout=str(offset), remote=remote)
	stdout, stderr = subprocessCall(shlex.split(command))
	size = re.search("Size\:\s*(?P<size>\d*)", stdout)
	if size:
		size = int(size.groupdict().get("size", "0"))
		if size <= 0:
			log.critical("Could not get file size of \"{remote}\"!".format(remote=remote))
			return False
	else:
		log.critical("Could not get file size of \"{remote}\"!".format(remote=remote))
		return None

	timeout = offset + size/1024/bandwidth
	command = "gfal-copy --abort-on-failure --timeout {timeout} --transfer-timeout {timeout} --force {remote} {local}".format(
			timeout=str(timeout),
			remote=remote,
			local=local
	)
	log.debug(command)
	exit_code = logger.subprocessCall(shlex.split(command))
	if exit_code == 0:
		return True
	else:
		log.critical("Could not download \"{remote}\"!".format(remote=remote))
		return False
Пример #11
0
def hadd(target_file, source_files, hadd_args="", max_files=500):
	if len(source_files) == 0:
		log.critical("No source files specified to be merged!")
		sys.exit(1)
	
	source_files_chunks = [source_files[start_chunk_index:start_chunk_index+max_files] for start_chunk_index in xrange(0, len(source_files), max_files)]
	
	exit_code = 0
	for chunk_index, tmp_source_files in enumerate(source_files_chunks):
		tmp_target_file = "%s.hadd_%d.root" % (target_file, chunk_index)
		if chunk_index == len(source_files_chunks)-1:
			tmp_target_file = target_file
		
		last_target_file = ""
		if chunk_index > 0:
			last_target_file = "%s.hadd_%d.root" % (target_file, chunk_index-1)
		
		command = "hadd %s %s %s %s" % (hadd_args, tmp_target_file, " ".join(tmp_source_files), last_target_file)
		log.debug(command)
		exit_code = max(exit_code, logger.subprocessCall(shlex.split(command)))
		
		# remove last temp. merge result
		if len(last_target_file) > 0:
			os.remove(last_target_file)
	return exit_code
Пример #12
0
def main():

	parser = argparse.ArgumentParser(description="Get JEC parameter files using CMSSW tools.", parents=[logger.loggingParser])
	parser.add_argument("--gt-data", default="FT_53_V21_AN4::All",
	                    help="Global tag for data. [Default: %(default)s]")
	parser.add_argument("--gt-mc", default="START53_V23::All",
	                    help="Global tag for MC. [Default: %(default)s]")
	parser.add_argument("--pf-jets", default="AK5PF",
	                    help="Name of PF jets collection. [Default: %(default)s]")
	parser.add_argument("--calo-jets", default="",
	                    help="Name of Calo jets collection. [Default: %(default)s]")
	args = parser.parse_args()
	logger.initLogger(args)
	args = vars(args)
	
	python_config_template = string.Template("""
import FWCore.ParameterSet.Config as cms
process = cms.Process("jectxt")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
# define your favorite global tag
process.GlobalTag.globaltag = "$global_tag"
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.source = cms.Source("EmptySource")
process.readAK5PF    = cms.EDAnalyzer("JetCorrectorDBReader",
      payloadName    = cms.untracked.string("$pf_jets"),
      # this is used ONLY for the name of the printed txt files. You can use any name that you like,
      # but it is recommended to use the GT name that you retrieved the files from.
      globalTag      = cms.untracked.string("$global_tag_name"),
      printScreen    = cms.untracked.bool(False),
      createTextFile = cms.untracked.bool(True)
)
process.readAK5Calo = process.readAK5PF.clone(payloadName = "$calo_jets")
process.p = cms.Path(process.readAK5PF * process.readAK5Calo)
""")
	
	for mode in ["data", "mc"]:
		python_config = python_config_template.substitute(global_tag=args["gt_"+mode],
		                                                  global_tag_name=args["gt_"+mode].replace(":", "_"),
		                                                  pf_jets=args["pf_jets"],
		                                                  calo_jets=args["calo_jets"])
		if args["calo_jets"] == "":
			python_config = python_config.replace("\nprocess.readAK5Calo = process.readAK5PF.clone(payloadName = \"\")", "")
			python_config = python_config.replace("process.p = cms.Path(process.readAK5PF * process.readAK5Calo)",
			                                      "process.p = cms.Path(process.readAK5PF)")
		
		
		python_config_file_name = None
		with tempfile.NamedTemporaryFile(prefix=mode, suffix=".py", delete=False) as python_config_file:
			python_config_file_name = python_config_file.name
			python_config_file.write(python_config)
		
		if python_config_file_name != None:
			command = ["cmsRun", python_config_file_name]
			log.info("Execute \"%s\"." % " ".join(command))
			exit_code = logger.subprocessCall(command)
			log.info("Exit code: " + str(exit_code))
			os.remove(python_config_file_name)
Пример #13
0
def main():
	
	parser = argparse.ArgumentParser(description="Merge Artus outputs per nick name.", parents=[logger.loggingParser])

	parser.add_argument("project_dir", help="Artus Project directory containing the files \"output/*/*.root\" to merge")
	parser.add_argument("-b", "--batch", default="host", help="Select backend. [Default: %(default)s]")

	args = parser.parse_args()
	logger.initLogger(args)
	
	gc_work_directory = os.path.join(args.project_dir, "workdir_merge")
	merged_directory = os.path.join(args.project_dir, "merged")
	
	#remove directories that will be overwritten
	directories_to_remove = []
	for directory in [gc_work_directory, merged_directory]:
		if os.path.exists(directory):
			directories_to_remove.append(directory)
	logger.subprocessCall(("rm -rf " + (" ".join(directories_to_remove))).split())
	
	gc_config_base_filename = os.path.expandvars("$CMSSW_BASE/src/Artus/Configuration/data/grid-control_merge_artus_outputs_base.conf")
	gc_config_base = ""
	with open(gc_config_base_filename) as gc_config_base_file:
		gc_config_base = gc_config_base_file.read().rstrip()
	
	backend = open(os.path.expandvars("$CMSSW_BASE/src/Artus/Configuration/data/grid-control_backend_" + args.batch + ".conf"), 'r').read()
	gc_config = string.Template(gc_config_base).safe_substitute(
			BACKEND = backend,
			PROJECT_DIR=args.project_dir,
			CMSSW_BASE=os.path.expandvars("$CMSSW_BASE"),
			SCRAM_ARCH=os.path.expandvars("$SCRAM_ARCH"),
	)
	
	gc_config_filename = os.path.join(args.project_dir, "grid-control_merge_artus_outputs.conf")
	if os.path.exists(gc_config_filename):
		os.remove(gc_config_filename)
	with open(gc_config_filename, "w") as gc_config_file:
		gc_config_file.write(gc_config)
	
	command = "go.py " + gc_config_filename
	log.info(command)
	logger.subprocessCall(command.split())
	
	log.info("Output is written to directory \"%s\"" % merged_directory)
Пример #14
0
def main():
	
	ROOT.gSystem.Load(os.path.expandvars("$CMSSW_BASE/src/Kappa/lib/libKappa"))
	
	parser = argparse.ArgumentParser(description="Switch to the Kappa commit that has been used for a certain skim.",
	                                 parents=[logger.loggingParser])
	
	parser.add_argument("file", help="Kappa skim output file containing the UserInfo in the Lumis tree")
	parser.add_argument("-a", "--args", default="",
	                    help="Arguments for git checkout. [Default: %(default)s]")
	
	args = parser.parse_args()
	logger.initLogger(args)
	
	root_file = ROOT.TFile(args.file, "READ")
	lumis = root_file.Get("Lumis")
	lumis.GetEntry(0)
	
	user_infos = lumis.GetUserInfo()
	assert user_infos.GetEntries() >= 2
	keys = user_infos[0]
	values = user_infos[1]
	
	user_infos_dict = { str(k.GetString()) : str(v.GetString()) for k, v in zip(keys, values) }
	
	cwd = os.getcwd()
	if not cwd.endswith("Kappa") or cwd.endswith("Kappa/"):
		log.fatal("The script needs to be executed in $CMSSW_BASE/src/Kappa!")
		sys.exit(1)
	
	kappa_repo = [k for k in user_infos_dict.keys() if k.endswith("Kappa") or k.endswith("Kappa/")]
	if len(kappa_repo) == 0:
		log.fatal("Kappa revision not found in file \"%s\"!" % args.file)
		sys.exit(1)
	else:
		kappa_repo = kappa_repo[0]
		
	#sorted([tools.longest_common_substring(key, cwd) for key in user_infos_dict.keys()], key=lambda item: len(item))[-1]
	kappa_revision = user_infos_dict[kappa_repo]
	
	command = "git checkout %s %s" % (args.args, kappa_revision)
	log.info(command)
	if not raw_input("Proceed? [Y|n] ").lower().strip().startswith("n"):
		logger.subprocessCall(shlex.split(command))
def _call_command(args):
	command = None
	cwd = None
	if isinstance(args, basestring):
		command = args
	else:
		command = args[0]
		if len(args) > 1:
			cwd = args[1]

	old_cwd = None
	if not cwd is None:
		old_cwd = os.getcwd()
		os.chdir(cwd)
	log.debug(command)
	logger.subprocessCall(command, shell=True)

	if not cwd is None:
		os.chdir(old_cwd)
Пример #16
0
def main():
	parser = argparse.ArgumentParser(description="Compare different repository versions configured in Artus configs. The script has to be executed in the directory of the repository.", parents=[logger.loggingParser])
	parser.add_argument("files", help="Two configurations. The configs should be ordered by the expected repository version. Can be either Artus output root files or JSON text files", nargs=2)

	args = parser.parse_args()
	logger.initLogger(args)
	
	config1 = jsonTools.JsonDict(args.files[0])
	config2 = jsonTools.JsonDict(args.files[1])
	
	dirname = os.path.basename(os.getcwd())
	repo_key1 = sorted([key for key in config1.keys() if key.startswith("/") and key.endswith(dirname)], key=lambda item: len(item))[-1]
	repo_key2 = sorted([key for key in config2.keys() if key.startswith("/") and key.endswith(dirname)], key=lambda item: len(item))[-1]
	
	repo_version1 = config1[repo_key1]
	repo_version2 = config2[repo_key2]
	
	command = "git diff %s..%s" % (repo_version1, repo_version2)
	log.info(command)
	logger.subprocessCall(shlex.split(command))
Пример #17
0
def batch_submission(commands, batch="rwthcondor"):
	project_directory = tempfile.mkdtemp(prefix="batch_submission_"+datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")+"_")
	
	# prepare commands
	if (len(commands) == 0) and (not sys.stdin.isatty()):
		commands.extend(sys.stdin.read().strip().split("\n"))
	
	template_execute_command_n = ""
	with open(os.path.expandvars("$CMSSW_BASE/src/Artus/Utility/data/template_execute_command_n.sh"), "r") as template_execute_command_n_file:
		template_execute_command_n = template_execute_command_n_file.read().strip()
	
	execute_command_n_filename = os.path.join(project_directory, "execute_command_n.sh")
	with open(execute_command_n_filename, "w") as execute_command_n_file:
		execute_command_n_file.write(
				string.Template(template_execute_command_n).safe_substitute(
						commands="'"+("'\n\t'".join(commands))+"'"
				)
		)
	
	# prepare GC
	main_config = ""
	with open(os.path.expandvars("$CMSSW_BASE/src/Artus/Utility/data/grid-control_base_config.conf"), "r") as main_config_file:
		main_config = main_config_file.read()
	
	backend_config = ""
	with open(os.path.expandvars("$CMSSW_BASE/src/Artus/Configuration/data/grid-control_backend_" + batch + ".conf"), "r") as backend_config_file:
		backend_config = backend_config_file.read()
	
	final_config_filename = os.path.join(project_directory, "grid-control.conf")
	with open(final_config_filename, "w") as final_config_file:
		final_config_file.write(
				string.Template(main_config).safe_substitute(
						command_indices=" ".join(str(index) for index in range(len(commands))),
						backend=backend_config
				)
		)
	
	# run
	command = "go.py " + final_config_filename
	log.info(command)
	logger.subprocessCall(shlex.split(command))
Пример #18
0
	def measurePerformance(self, profTool):
		"""run Artus with profiler"""
		exitCode = 0

		# check output directory
		outputDir = os.path.dirname(self._args.output_file)
		if outputDir and not os.path.exists(outputDir):
			os.makedirs(outputDir)

		commands=""
		profile_outputs=""
		if profTool == "igprof":
			# call C++ executable with profiler igprof
			profile_outputs = [os.path.join(outputDir, filename) for filename in ["igprof.pp.gz", "igprof.analyse.txt", "igprof.analyse.sql3"]]

			commands = [
			"igprof -d -pp -z -o " + profile_outputs[0] + " " + self._executable + " " + self._configFilename,
			"execute-command.sh igprof-analyse -d -v -g " + profile_outputs[0] + " > " + profile_outputs[1],
			"execute-command.sh igprof-analyse --sqlite -d -v -g " + profile_outputs[0] + " | sqlite3 " + profile_outputs[2],
			"igprof-navigator " + profile_outputs[2],
			]
		elif profTool == "valgrind":
			# call C++ executable with profiler valgrind
			profile_outputs = [os.path.join(outputDir, filename) for filename in ["callgrind.out.8080"]]

			commands = [
			"valgrind --tool=callgrind --callgrind-out-file=" + profile_outputs[0] + " " + self._executable + " " + self._configFilename,
			"callgrind_annotate --auto=yes " + profile_outputs[0],
			]
		else:
			log.info(profTool + "is not a valid profiler")

		for command in commands:
			log.info("Execute \"%s\"." % command)
			logger.subprocessCall(command.split())

		if profile_outputs:
			log.info("Profiling output is written to \"%s\"." % "\", \"".join(profile_outputs))

		return 0
Пример #19
0
def delete_filelist(filelist):
	if not filelist.endswith(".root"):
		content = ""
		with open(filelist) as filelist_file:
			content = filelist_file.read()
		
		for line in content.split("\n"):
			delete_filelist(line)
	
	if filelist.endswith(".root"):
		for src, dst in filename_replacements.iteritems():
			filelist = filelist.replace(src, dst)
		command = "gfal-rm " + filelist
		log.debug(command)
		logger.subprocessCall(shlex.split(command))
	else:
		command = "git rm " + filelist
		if os.path.islink(filelist):
			command += (" " + os.path.relpath(os.path.realpath(filelist), os.getcwd()))
		log.debug(command)
		exit_code = logger.subprocessCall(shlex.split(command))
		if exit_code != 0:
			log.warning("Please execute the script in the repository, that contains the filelists!")
			logger.subprocessCall(shlex.split(command[4:]))
Пример #20
0
	def deepreplaceremotefiles(jsonDict, tmp_directory, remote_identifiers=None):
		""" download remote files in dictionary values first and point to this copies in the dictionary """
		remote_identifiers = ["dcap", "root", "srm"]
		
		if not os.path.exists(tmp_directory):
			os.makedirs(tmp_directory)
		
		result = None
		if isinstance(jsonDict, dict):
			result = JsonDict()
			for key, value in jsonDict.items():
				result[key] = JsonDict.deepreplaceremotefiles(value, tmp_directory, remote_identifiers)
		elif isinstance(jsonDict, collections.Iterable) and not isinstance(jsonDict, basestring):
			result = []
			for item in jsonDict:
				result.append(JsonDict.deepreplaceremotefiles(item, tmp_directory, remote_identifiers))
		elif isinstance(jsonDict, basestring):
			if any([jsonDict.strip().rstrip().startswith(remote_identifier) for remote_identifier in remote_identifiers]):
				#prefix, suffix = os.path.splitext(jsonDict.strip().rstrip())
				#result = tempfile.mktemp(prefix=prefix+"_", suffix=suffix, dir=tmp_directory)
				result = os.path.join(tmp_directory, jsonDict.strip().rstrip().replace(":", "_").replace("/", "__")[-200:])
				copy_command = "gfal-copy --timeout 1800 --force {remote} file://{local}".format(remote=dcachetools.xrd2srm(jsonDict), local=result)
				log.debug(copy_command)
				success = True
				try:
					exitCode = logger.subprocessCall(copy_command.split())
					if exitCode != 0:
						result = jsonDict
						success = False
						log.critical("Could not download \""+jsonDict+"\"!")
						sys.exit(1)
				except:
					result = jsonDict
					success = False
				if not success:
					log.critical("Could not download \""+jsonDict+"\"!")
					#sys.exit(1)
			else:
				result = jsonDict
		else:
			result = jsonDict
		return result
Пример #21
0
def execute_commands(commands, max_n_trials=2):
	for command in commands:
		log.info("\nExecute \"%s\"." % command)
		exit_code = 1
		n_trials = 0
		while exit_code != 0:
			n_trials += 1
			print n_trials, max_n_trials
			if n_trials > max_n_trials:
				log.error("Last command could not be executed successfully! Stop program!")
				sys.exit(1)

			if command.startswith("cd"):
				os.chdir(os.path.expandvars(command.replace("cd ", "")))
				print os.path.abspath(os.getcwd())
				print os.path.abspath(os.path.expandvars(command.replace("cd ", "")))
				print (os.path.abspath(os.path.expandvars(command.replace("cd ", ""))) != os.path.abspath(os.getcwd()))
				print int(os.path.abspath(os.path.expandvars(command.replace("cd ", ""))) != os.path.abspath(os.getcwd()))
				exit_code = int((os.path.abspath(os.path.expandvars(command.replace("cd ", ""))) != os.path.abspath(os.getcwd())))
			else:
				exit_code = logger.subprocessCall(command.split())
def do_cv_cf_scan(output_dir, mass="125", print_only=False):
    command = "submit.py --interactive --stable-new --multidim-fit --physics-model cV-cF --points 900 {datacards}".format(
        datacards=os.path.join(output_dir, mass)
    )
    log.info(command)
    if not print_only:
        logger.subprocessCall(shlex.split(command))

    command = command.replace("submit.py --interactive", "limit.py --algo grid")
    log.info(command)
    if not print_only:
        logger.subprocessCall(shlex.split(command))

    command = "rm -rfv {dirs} log/".format(dirs=" ".join(glob.glob("*CV-CF-{mass}/".format(mass=mass))))
    log.info(command)
    if not print_only:
        logger.subprocessCall(shlex.split(command))
Пример #23
0
	def callExecutable(self):
		"""run Artus analysis (C++ executable)"""
		exitCode = 0

		# check output directory
		outputDir = os.path.dirname(self._args.output_file)
		if outputDir and not os.path.exists(outputDir):
			os.makedirs(outputDir)

		# call C++ executable locally
		command = self._executable + " " + self._configFilename
		log.info("Execute \"%s\"." % command)
		exitCode = logger.subprocessCall(command.split())
	
		if exitCode != 0:
			log.error("Exit with code %s.\n\n" % exitCode)
			log.info("Dump configuration:\n")
			log.info(self._configFilename) # TODO

		# remove tmp. config
		# logging.getLogger(__name__).info("Remove temporary config file.")
		# os.system("rm " + self._configFilename)

		return exitCode
		datacards = cpstudiesdatacards.CPStudiesDatacards(cb=cb)
		
		for model in args.models:
			model_settings = models.get(model, {})
			
			datacards_workspaces = {}
			for freeze_syst_uncs in args.freeze_syst_uncs:
				
				output_dir_base = args.output_dir
				if output_dir_base is None:
					output_dir_base = os.path.splitext(datacard)[0]
				sub_dir_base = os.path.join("projection", model, "statUnc" if freeze_syst_uncs else "totUnc")
				output_dir_base = os.path.abspath(os.path.expandvars(os.path.join(output_dir_base, sub_dir_base)))
				
				if args.clear_output_dir and os.path.exists(output_dir_base):
					logger.subprocessCall("rm -r " + output_dir_base, shell=True)				
				
				# scale datacards
				datacards_cbs = {}
				datacards_poi_ranges = {}
				for lumi in args.lumis:
					output_dir = os.path.join(output_dir_base, "{:06}".format(lumi))
					if not os.path.exists(output_dir):
						os.makedirs(output_dir)
					
					scaled_datacards = cpstudiesdatacards.CPStudiesDatacards(cb=datacards.cb.deep())
					
					lumi_scale_factor = lumi / args.lumi_datacards
					scaled_datacards.scale_expectation(lumi_scale_factor, no_norm_rate_sig=True)
					#scaled_datacards.replace_observation_by_asimov_dataset("125")
					scaled_datacards.cb.PrintAll()
Пример #25
0
	def sendToBatchSystem(self):
		
		# write dbs file
		dbsFileContent = ""
		for nickname, filelist in self._gridControlInputFiles.iteritems():
			dbsFileContent += "\n[" + nickname + "]\nnickname = " + nickname + "\n"
			for inputEntry in filelist:
				dbsFileContent += inputEntry + "\n"
		
		dbsFileBasename = "datasets_{0}.dbs".format(hashlib.md5(str(self._config)).hexdigest())
		dbsFileBasepath = os.path.join(self.projectPath, dbsFileBasename)
		with open(dbsFileBasepath, "w") as dbsFile:
			dbsFile.write(dbsFileContent)
		
		gcConfigFilePath = os.path.expandvars(self._args.gc_config)
		gcConfigFile = open(gcConfigFilePath,"r")
		tmpGcConfigFileBasename = "grid-control_base_config_{0}.conf".format(hashlib.md5(str(self._config)).hexdigest())
		tmpGcConfigFileBasepath = os.path.join(self.projectPath, tmpGcConfigFileBasename)

		# open base file and save it to a list
		tmpGcConfigFile = open(tmpGcConfigFileBasepath,"w")
		gcConfigFileContent = gcConfigFile.readlines()
		gcConfigFile.close()
		
		sepathRaw = os.path.join(self.projectPath, "output")
		
		epilogArguments  = r"epilog arguments = "
		epilogArguments += r"--disable-repo-versions "
		epilogArguments += r"--log-level debug "
		if self._args.no_log_to_se:
			epilogArguments += r"--log-files log.txt "
		else:
			epilogArguments += r"--log-files " + os.path.join(sepathRaw, "${DATASETNICK}", "${DATASETNICK}_job_${MY_JOBID}_log.txt") + " "
		epilogArguments += r"--print-envvars ROOTSYS CMSSW_BASE DATASETNICK FILE_NAMES LD_LIBRARY_PATH "
		epilogArguments += r"-c " + os.path.basename(self._configFilename) + " "
		epilogArguments += "--nick $DATASETNICK "
		epilogArguments += "-i $FILE_NAMES "
		if not self._args.ld_library_paths is None:
			epilogArguments += ("--ld-library-paths %s" % " ".join(self._args.ld_library_paths))
		
		sepath = "se path = " + (self._args.se_path if self._args.se_path else sepathRaw)
		workdir = "workdir = " + os.path.join(self.projectPath, "workdir")
		backend = open(os.path.expandvars("$CMSSW_BASE/src/Artus/Configuration/data/grid-control_backend_" + self._args.batch + ".conf"), 'r').read()
		self.replacingDict = dict(
				include = ("include = " + " ".join(self._args.gc_config_includes) if self._args.gc_config_includes else ""),
				epilogexecutable = "epilog executable = $CMSSW_BASE/bin/" + os.path.join(os.path.expandvars("$SCRAM_ARCH"), os.path.basename(sys.argv[0])),
				sepath = sepath,
				workdir = workdir,
				jobs = "" if not self._args.fast else "jobs = " + str(self._args.fast),
				inputfiles = "input files = \n\t" + self._configFilename,
				filesperjob = "files per job = " + str(self._args.files_per_job),
				areafiles = self._args.area_files if (self._args.area_files != None) else "",
				walltime = "wall time = " + self._args.wall_time,
				memory = "memory = " + str(self._args.memory),
				cmdargs = "cmdargs = " + self._args.cmdargs,
				dataset = "dataset = \n\t:ListProvider:" + dbsFileBasepath,
				epilogarguments = epilogArguments,
				seoutputfiles = "se output files = *.txt *.root" if self._args.no_log_to_se else "se output files = *.root",
				backend = backend
		)

		self.modify_replacing_dict()

		self.replaceLines(gcConfigFileContent, self.replacingDict)
		for index, line in enumerate(gcConfigFileContent):
			gcConfigFileContent[index] = line.replace("$CMSSW_BASE", os.environ.get("CMSSW_BASE", ""))

		# save it
		for line in gcConfigFileContent:
			tmpGcConfigFile.write(line)
		tmpGcConfigFile.close()

		exitCode = 0
		command = "go.py " + tmpGcConfigFileBasepath
		log.info("Execute \"%s\"." % command)
		if not self._args.no_run:
			exitCode = logger.subprocessCall(command.split())
		
		log.info("Output is written to directory \"%s\"" % sepathRaw)
		log.info("\nMerge outputs in one file per nick using")
		log.info("artusMergeOutputs.py %s" % self.projectPath)
		log.info("artusMergeOutputsWithGC.py %s" % self.projectPath)

		if exitCode != 0:
			log.error("Exit with code %s.\n\n" % exitCode)
			log.info("Dump configuration:\n")
			log.info(self._configFilename)

		return exitCode
	args = parser.parse_args()
	logger.initLogger(args)
	
	if (args.era == "2015") or (args.era == "2015new"):
		import HiggsAnalysis.KITHiggsToTauTau.plotting.configs.samples_run2_2015 as samples
	elif args.era == "2016":
		import HiggsAnalysis.KITHiggsToTauTau.plotting.configs.samples_run2_2016 as samples
		if args.lumi == parser.get_default("lumi"):
			args.lumi = samples.default_lumi/1000.0
	else:
		log.critical("Invalid era string selected: " + args.era)
		sys.exit(1)
	
	args.output_dir = os.path.abspath(os.path.expandvars(args.output_dir))
	if args.clear_output_dir and os.path.exists(args.output_dir):
		logger.subprocessCall("rm -r " + args.output_dir, shell=True)
	#prepare binning
	nbins = args.n_bins
	nbins1D = int(nbins**(1.0/len(args.quantities)))
	if nbins1D<2:
		nbins1D=2
		
	# initialisations for plotting
	sample_settings = samples.Samples()
	binnings_settings = binnings.BinningsDict()
	systematics_factory = systematics.SystematicsFactory()
	
	plot_configs = []
	output_files = []
	merged_output_files = []
	hadd_commands = []
def _call_command(command):
	log.debug(command)
	logger.subprocessCall(command, shell=True)
Пример #28
0
		datacards = smhttdatacards.SMHttDatacards(cb=cb)
		
		for model in args.models:
			model_settings = models.get(model, {})
			
			datacards_workspaces = {}
			for freeze_syst_uncs in args.freeze_syst_uncs:
				
				output_dir_base = args.output_dir
				if output_dir_base is None:
					output_dir_base = os.path.splitext(datacard)[0]
				sub_dir_base = os.path.join("projection", model, "statUnc" if freeze_syst_uncs else "totUnc")
				output_dir_base = os.path.abspath(os.path.expandvars(os.path.join(output_dir_base, sub_dir_base)))
				
				if args.clear_output_dir and os.path.exists(output_dir_base):
					logger.subprocessCall("rm -r " + output_dir_base, shell=True)
				
				# scale datacards
				datacards_cbs = {}
				datacards_poi_ranges = {}
				for lumi in args.lumis:
					output_dir = os.path.join(output_dir_base, "{:06}".format(lumi))
					if not os.path.exists(output_dir):
						os.makedirs(output_dir)
					
					scaled_datacards = smhttdatacards.SMHttDatacards(cb=datacards.cb.deep())
					
					lumi_scale_factor = lumi / args.lumi_datacards
					scaled_datacards.scale_expectation(lumi_scale_factor)
					#scaled_datacards.replace_observation_by_asimov_dataset("125")
					
Пример #29
0
def _call_command(command):
    log.debug(command)
    logger.subprocessCall(shlex.split(command))
Пример #30
0
    def sendToBatchSystem(self):

        # write dbs file
        dbsFileContent = tools.write_dbsfile(self._gridControlInputFiles)

        dbsFileBasename = "datasets.dbs"
        dbsFileBasepath = os.path.join(self.localProjectPath, dbsFileBasename)
        with open(dbsFileBasepath, "w") as dbsFile:
            dbsFile.write(dbsFileContent)

        gcConfigFilePath = os.path.expandvars(self._args.gc_config)
        gcConfigFile = open(gcConfigFilePath, "r")
        tmpGcConfigFileBasename = "grid-control_config.conf"
        tmpGcConfigFileBasepath = os.path.join(self.localProjectPath,
                                               tmpGcConfigFileBasename)

        # open base file and save it to a list
        tmpGcConfigFile = open(tmpGcConfigFileBasepath, "w")
        gcConfigFileContent = gcConfigFile.readlines()
        gcConfigFile.close()

        sepathRaw = os.path.join(self.projectPath, "output")

        epilogArguments = r"epilog arguments = "
        epilogArguments += r"--disable-repo-versions "
        epilogArguments += r"--log-level " + self._args.log_level + " "
        if self._args.log_to_se:
            epilogArguments += r"--log-files " + os.path.join(
                sepathRaw, "${DATASETNICK}",
                "${DATASETNICK}_job_${MY_JOBID}_log.log") + " "
        else:
            epilogArguments += r"--log-files log.log --log-stream stdout "
        epilogArguments += r"--print-envvars ROOTSYS CMSSW_BASE DATASETNICK FILE_NAMES LD_LIBRARY_PATH "
        epilogArguments += r"-c " + os.path.basename(
            self._configFilename) + " "
        epilogArguments += "--nick $DATASETNICK "
        epilogArguments += "-i $FILE_NAMES "
        if not self._args.ld_library_paths is None:
            epilogArguments += ("--ld-library-paths %s" %
                                " ".join(self._args.ld_library_paths))

        sepath = "se path = " + (self._args.se_path
                                 if self._args.se_path else sepathRaw)
        workdir = "workdir = " + os.path.join(self.localProjectPath, "workdir")
        backend = open(
            os.path.expandvars(
                "$CMSSW_BASE/src/Artus/Configuration/data/grid-control_backend_"
                + self._args.batch + ".conf"), 'r').read()
        self.replacingDict = dict(
            include=("include = " + " ".join(self._args.gc_config_includes)
                     if self._args.gc_config_includes else ""),
            epilogexecutable="epilog executable = " +
            os.path.basename(sys.argv[0]),
            sepath=sepath,
            workdir=workdir,
            jobs="" if self._args.fast is None else "jobs = " +
            str(self._args.fast),
            inputfiles="input files = \n\t" + self._configFilename + "\n\t" +
            os.path.expandvars(
                os.path.join("$CMSSW_BASE/bin/$SCRAM_ARCH",
                             os.path.basename(sys.argv[0]))),
            filesperjob="files per job = " + str(self._args.files_per_job),
            areafiles=self._args.area_files if
            (self._args.area_files != None) else "",
            walltime="wall time = " + self._args.wall_time,
            memory="memory = " + str(self._args.memory),
            cmdargs="cmdargs = " + self._args.cmdargs,
            dataset="dataset = \n\t:ListProvider:" + dbsFileBasepath,
            epilogarguments=epilogArguments,
            seoutputfiles="se output files = *.root"
            if self._args.log_to_se else "se output files = *.log *.root",
            backend=backend,
            partitionlfnmodifier="partition lfn modifier = " +
            self._args.partition_lfn_modifier if
            (self._args.partition_lfn_modifier != None) else "")

        self.modify_replacing_dict()

        self.replaceLines(gcConfigFileContent, self.replacingDict)
        for index, line in enumerate(gcConfigFileContent):
            gcConfigFileContent[index] = line.replace(
                "$CMSSW_BASE", os.environ.get("CMSSW_BASE", ""))
            gcConfigFileContent[index] = line.replace(
                "$X509_USER_PROXY", os.environ.get("X509_USER_PROXY", ""))

        # save it
        for line in gcConfigFileContent:
            tmpGcConfigFile.write(line)
        tmpGcConfigFile.close()

        exitCode = 0
        command = "go.py " + tmpGcConfigFileBasepath
        log.info("Execute \"%s\"." % command)
        if not self._args.no_run:
            exitCode = logger.subprocessCall(command.split())

        log.info("Output is written to directory \"%s\"" % sepathRaw)
        log.info("\nMerge outputs in one file per nick using")
        if not self.remote_se:
            log.info("artusMergeOutputs.py %s" % self.projectPath)
        log.info(
            "artusMergeOutputsWithGC.py %s" %
            (self.localProjectPath if self.remote_se else self.projectPath))

        if exitCode != 0:
            log.error("Exit with code %s.\n\n" % exitCode)
            log.info("Dump configuration:\n")
            log.info(self._configFilename)

        return exitCode
Пример #31
0
def _call_command(command):
	log.debug(command)
	logger.subprocessCall(command, shell=True)
    channels = " ".join(["--channel " + channel for channel in args.channels])

    plot_configs = []

    # 8TeV results
    output_dir = os.path.join(args.output_dir, "8TeV")
    if not args.plots_only:
        clear_output_dir(output_dir, args.print_only)

        # datacards
        command = "SMLegacyDatacards {channels} --output {output_dir} --asimov --asimov-mass 125".format(
            channels=channels, output_dir=output_dir
        )
        log.info(command)
        if not args.print_only:
            logger.subprocessCall(shlex.split(command))

            # limits
        do_limits(output_dir, args.print_only)

        # p-values
        do_p_values(output_dir, args.print_only)

        # cV-cF scan
        do_cv_cf_scan(output_dir, print_only=args.print_only)

        # annotations for plotting
        annotate_lumi(output_dir, lumi=-1.0, print_only=args.print_only)

        # plotting
    for json in [
Пример #33
0
def main():

    parser = argparse.ArgumentParser(
        description="Get JEC parameter files using CMSSW tools.",
        parents=[logger.loggingParser])
    parser.add_argument("--gt-data",
                        default="FT_53_V21_AN4::All",
                        help="Global tag for data. [Default: %(default)s]")
    parser.add_argument("--gt-mc",
                        default="START53_V23::All",
                        help="Global tag for MC. [Default: %(default)s]")
    parser.add_argument(
        "--pf-jets",
        default="AK5PF",
        help="Name of PF jets collection. [Default: %(default)s]")
    parser.add_argument(
        "--calo-jets",
        default="",
        help="Name of Calo jets collection. [Default: %(default)s]")
    args = parser.parse_args()
    logger.initLogger(args)
    args = vars(args)

    python_config_template = string.Template("""
import FWCore.ParameterSet.Config as cms
process = cms.Process("jectxt")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
# define your favorite global tag
process.GlobalTag.globaltag = "$global_tag"
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.source = cms.Source("EmptySource")
process.readAK5PF    = cms.EDAnalyzer("JetCorrectorDBReader",
      payloadName    = cms.untracked.string("$pf_jets"),
      # this is used ONLY for the name of the printed txt files. You can use any name that you like,
      # but it is recommended to use the GT name that you retrieved the files from.
      globalTag      = cms.untracked.string("$global_tag_name"),
      printScreen    = cms.untracked.bool(False),
      createTextFile = cms.untracked.bool(True)
)
process.readAK5Calo = process.readAK5PF.clone(payloadName = "$calo_jets")
process.p = cms.Path(process.readAK5PF * process.readAK5Calo)
""")

    for mode in ["data", "mc"]:
        python_config = python_config_template.substitute(
            global_tag=args["gt_" + mode],
            global_tag_name=args["gt_" + mode].replace(":", "_"),
            pf_jets=args["pf_jets"],
            calo_jets=args["calo_jets"])
        if args["calo_jets"] == "":
            python_config = python_config.replace(
                "\nprocess.readAK5Calo = process.readAK5PF.clone(payloadName = \"\")",
                "")
            python_config = python_config.replace(
                "process.p = cms.Path(process.readAK5PF * process.readAK5Calo)",
                "process.p = cms.Path(process.readAK5PF)")

        python_config_file_name = None
        with tempfile.NamedTemporaryFile(prefix=mode,
                                         suffix=".py",
                                         delete=False) as python_config_file:
            python_config_file_name = python_config_file.name
            python_config_file.write(python_config)

        if python_config_file_name != None:
            command = ["cmsRun", python_config_file_name]
            log.info("Execute \"%s\"." % " ".join(command))
            exit_code = logger.subprocessCall(command)
            log.info("Exit code: " + str(exit_code))
            os.remove(python_config_file_name)
Пример #34
0
	def webplotting(www, output_dir, output_filenames=False,
			www_text=False, www_title="plots_archive",
			www_nodate=False, additional_output_files=False,
			save_legend=False, export_json=False, no_publish=False,
			www_no_overwrite=False, remote_subdir=None):
		# set some needed variables
		# user = tools.get_environment_variable("HARRY_REMOTE_USER")
		html_content = ""
		overview_filename = "index.html"
		date = datetime.date.today().strftime('%Y_%m_%d')
		if remote_subdir is None:
			remote_subdir = os.path.expandvars(os.path.join((date if (www == "" or not www_nodate) else ""), (www if type(www)==str else "")))
		url = os.path.expandvars(os.path.join("$HARRY_URL", remote_subdir, overview_filename))
		plots_for_gallery = [p for p in sorted(os.listdir(output_dir)) if (os.path.isfile(os.path.join(output_dir, p)) and all([not p.endswith("." + ext) for ext in ["json", "html"]]))]
		# get the html templates
		files_to_copy = []
		for galleryplot in plots_for_gallery:
			files_to_copy.append(os.path.join(output_dir, galleryplot))
		html_texts = {}
		for var in ['overview', 'description', 'plot']:
			with open(os.path.expandvars("$ARTUSPATH/HarryPlotter/data/template_webplotting_{}.html".format(var))) as htmlfile:
				html_texts[var] = string.Template(htmlfile.read())
		html_texts['description'] = html_texts['description'].substitute(url=url)
		if www_text:
			html_texts['description'] = www_text

		# loop over plots, make gallery (one entry for multiple formats)
		for plot in sorted(list(set([os.path.splitext(plot)[0] for plot in plots_for_gallery]))):
			formats = [os.path.splitext(p)[1] for p in plots_for_gallery if (plot == os.path.splitext(p)[0])]
			# use png for preview, if it exists
			image = plot + ('.png' if (plot +'.png' in plots_for_gallery) else formats[0])

			# links for the different formats
			links = ""
			for fileformat in formats:
				links +=' <a href="{}">{}</a>'.format(plot+fileformat, fileformat[1:])

			html_content += html_texts['plot'].substitute(
				title=plot,
				image=image,
				links=links,
				json=plot + ".json"
			)

		# put the html parts together and write
		with open(os.path.join(output_dir, overview_filename), "w") as overview_file:
			overview_file.write(html_texts['overview'].substitute(
				html_content=html_content,
				title=www_title,
				text=html_texts['description']
			))
		if no_publish:
			return 0

		# find out which files to copy
		if output_filenames is not False:
			files_to_copy = (output_filenames)
		files_to_copy += [os.path.join(output_dir, overview_filename)]

		if additional_output_files is not False:
			files_to_copy += additional_output_files
		if export_json is not False:
			files_to_copy += [export_json]
		if save_legend:
			files_to_copy += [os.path.join(output_dir, save_legend + _format) for _format in formats]

		# prepare the executed copy command
		web_plotting_copy_command = os.path.expandvars("$WEB_PLOTTING_COPY_COMMAND")

		# no overwriting
		if www_no_overwrite:
			web_plotting_ls_command = os.path.expandvars("$WEB_PLOTTING_LS_COMMAND").format(subdir=remote_subdir)
			process = subprocess.Popen(web_plotting_ls_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
			remote_files, error = process.communicate()
			if error is not None or '[ERROR]' in remote_files:
				log.error('Error occured during web ls! Exiting. Output: %s' % (remote_files))
				raise
			remote_dir = web_plotting_ls_command.split()[-1]
			remote_files = [i[len(remote_dir):].strip('/') if i.startswith(remote_dir) else i for i in remote_files.split()]
			for i in files_to_copy:
				if 'index.html' not in i and i in remote_files:
					log.error('File %s already existing at %s. Exiting' % (i, remote_dir))
					raise

		# create remote dir, copy files
		mkdir_command = os.path.expandvars("$WEB_PLOTTING_MKDIR_COMMAND").format(subdir=remote_subdir)
		copy_command = web_plotting_copy_command.format(source=" ".join(files_to_copy), subdir=remote_subdir)

		log.debug("Copying plots to webspace...")
		log.debug("\nIssueing mkdir command: " + mkdir_command)
		logger.subprocessCall(mkdir_command.split())
		log.debug("\nIssueing copy command: " + copy_command)
		logger.subprocessCall(copy_command.split())
		log.debug("Copied {0}".format(" ".join([f.split("/")[-1] for f in files_to_copy])))
		log.info("Website: {0}".format(url))
Пример #35
0
	def webplotting(www, output_dir, output_filenames=False, www_text = False, www_title="plots_archive", www_nodate = False, additional_output_files=False, save_legend=False, export_json = False, no_publish=False):
		# set some needed variables
		user = tools.get_environment_variable("HARRY_REMOTE_USER")
		html_content = ""
		overview_filename = "index.html"
		date = datetime.date.today().strftime('%Y_%m_%d')
		remote_subdir = os.path.expandvars(os.path.join((date if (www == "" or not www_nodate) else ""), (www if type(www)==str else "")))
		url = os.path.expandvars(os.path.join("$HARRY_URL", remote_subdir, overview_filename))
		plots_for_gallery = [p for p in sorted(os.listdir(output_dir)) if (os.path.isfile(os.path.join(output_dir, p)) and all([not p.endswith("."+ext) for ext in ["json", "html", "root"]]))]
		# get the html templates
		files_to_copy = []
		for galleryplot in plots_for_gallery:
			files_to_copy.append(os.path.join(output_dir, galleryplot))
		html_texts = {}
		for var in ['overview', 'description', 'plot']:
			with open(os.path.expandvars("$ARTUSPATH/HarryPlotter/data/template_webplotting_{}.html".format(var))) as htmlfile:
				html_texts[var] = string.Template(htmlfile.read())
		html_texts['description'] = html_texts['description'].substitute(url=url)
		if www_text:
			html_texts['description'] = www_text

		# loop over plots, make gallery (one entry for multiple formats)
		for plot in sorted(list(set([os.path.splitext(plot)[0] for plot in plots_for_gallery]))):
			formats = [os.path.splitext(p)[1] for p in plots_for_gallery if (plot == os.path.splitext(p)[0])]
			# use png for preview, if it exists
			image = plot + ('.png' if (plot +'.png' in plots_for_gallery) else formats[0])

			# links for the different formats
			links = ""
			for fileformat in formats:
				links +=' <a href="{}">{}</a>'.format(plot+fileformat, fileformat[1:])

			html_content += html_texts['plot'].substitute(
					title=plot,
					image=image,
					links=links,
					json=plot+".json"
			)

		# put the html parts together and write
		with open(os.path.join(output_dir, overview_filename), "w") as overview_file:
			overview_file.write(html_texts['overview'].substitute(
				html_content=html_content,
				title=www_title,
				text=html_texts['description']
			))
		if no_publish:
			return  0
		# find out which files to copy
		if(output_filenames != False):
			files_to_copy = ( output_filenames )
		files_to_copy += [os.path.join(output_dir, overview_filename)]
		if additional_output_files != False:
			files_to_copy += additional_output_files
		if export_json != False:
			files_to_copy += [export_json]
		if save_legend:
			files_to_copy += [os.path.join(output_dir, save_legend + _format) for _format in formats]

		# create remote dir, copy files
		mkdir_command = os.path.expandvars("$WEB_PLOTTING_MKDIR_COMMAND").format(subdir=remote_subdir)
		copy_command = os.path.expandvars("$WEB_PLOTTING_COPY_COMMAND").format(source=" ".join(files_to_copy), subdir=remote_subdir)

		log.info("Copying plots to webspace...")
		log.debug("\nIssueing mkdir command: " + mkdir_command)
		logger.subprocessCall(mkdir_command.split())
		log.debug("\nIssueing copy command: " + copy_command)
		logger.subprocessCall(copy_command.split())
		log.info("Copied {0}; see {1}".format(" ".join([f.split("/")[-1] for f in files_to_copy]), url))
Пример #36
0
	def webplotting(www, output_dir, output_filenames=False, www_text = False, www_title="plots_archive", additional_output_files=False, save_legend=False, export_json = False, no_publish=False):
		# set some needed variables
		user = tools.get_environment_variable("HARRY_REMOTE_USER")
		html_content = ""
		overview_filename = "index.html"
		date = datetime.date.today().strftime('%Y_%m_%d')
		remote_subdir = os.path.expandvars(os.path.join(date, (www if type(www)==str else "")))
		url = os.path.expandvars(os.path.join("$HARRY_URL", remote_subdir, overview_filename))
		plots_for_gallery = [p for p in sorted(os.listdir(output_dir)) if (os.path.isfile(os.path.join(output_dir, p)) and all([not p.endswith("."+ext) for ext in ["json", "html", "root"]]))]
		# get the html templates
		files_to_copy = []
		for galleryplot in plots_for_gallery:
			files_to_copy.append(os.path.join(output_dir, galleryplot))
		html_texts = {}
		for var in ['overview', 'description', 'plot']:
			with open(os.path.expandvars("$ARTUSPATH/HarryPlotter/data/template_webplotting_{}.html".format(var))) as htmlfile:
				html_texts[var] = string.Template(htmlfile.read())
		html_texts['description'] = html_texts['description'].substitute(url=url)
		if www_text:
			html_texts['description'] = www_text

		# loop over plots, make gallery (one entry for multiple formats)
		for plot in sorted(list(set([os.path.splitext(plot)[0] for plot in plots_for_gallery]))):
			formats = [os.path.splitext(p)[1] for p in plots_for_gallery if (plot == os.path.splitext(p)[0])]
			# use png for preview, if it exists
			image = plot + ('.png' if (plot +'.png' in plots_for_gallery) else formats[0])

			# links for the different formats
			links = ""
			for fileformat in formats:
				links +=' <a href="{}">{}</a>'.format(plot+fileformat, fileformat[1:])

			html_content += html_texts['plot'].substitute(
					title=plot,
					image=image,
					links=links,
					json=plot+".json"
			)

		# put the html parts together and write
		with open(os.path.join(output_dir, overview_filename), "w") as overview_file:
			overview_file.write(html_texts['overview'].substitute(
				html_content=html_content,
				title=www_title,
				text=html_texts['description']
			))
		if no_publish:
			return  0
		# find out which files to copy
		if(output_filenames != False):
			files_to_copy = ( output_filenames )
		files_to_copy += [os.path.join(output_dir, overview_filename)]
		if additional_output_files != False:
			files_to_copy += additional_output_files
		if export_json != False:
			files_to_copy += [export_json]
		if save_legend:
			files_to_copy += [os.path.join(output_dir, save_legend + _format) for _format in formats]

		# create remote dir, copy files
		mkdir_command = os.path.expandvars("$WEB_PLOTTING_MKDIR_COMMAND").format(subdir=remote_subdir)
		copy_command = os.path.expandvars("$WEB_PLOTTING_COPY_COMMAND").format(source=" ".join(files_to_copy), subdir=remote_subdir)

		log.info("Copying plots to webspace...")
		log.debug("\nIssueing mkdir command: " + mkdir_command)
		logger.subprocessCall(mkdir_command.split())
		log.debug("\nIssueing rsync command: " + copy_command)
		logger.subprocessCall(copy_command.split())
		log.info("Copied {0}; see {1}".format(" ".join([f.split("/")[-1] for f in files_to_copy]), url))
Пример #37
0
def main():

    parser = argparse.ArgumentParser(
        description="Batch submission of multiple commands.",
        parents=[logger.loggingParser])

    parser.add_argument(
        "commands",
        help=
        "Commands to be executed on a batch system. They can also be piped into this program.",
        nargs="*",
        default=[])
    parser.add_argument(
        "-b",
        "--batch",
        default="rwthcondor",
        help="Run with grid-control and select backend. [Default: %(default)s]"
    )

    args = parser.parse_args()
    logger.initLogger(args)

    project_directory = tempfile.mkdtemp(
        prefix="batch_submission_" +
        datetime.datetime.now().strftime("%Y-%m-%d_%H-%M") + "_")

    # prepare commands
    if (len(args.commands) == 0) and (not sys.stdin.isatty()):
        args.commands.extend(sys.stdin.read().strip().split("\n"))

    template_execute_command_n = ""
    with open(
            os.path.expandvars(
                "$CMSSW_BASE/src/Artus/Utility/data/template_execute_command_n.sh"
            ), "r") as template_execute_command_n_file:
        template_execute_command_n = template_execute_command_n_file.read(
        ).strip()

    execute_command_n_filename = os.path.join(project_directory,
                                              "execute_command_n.sh")
    with open(execute_command_n_filename, "w") as execute_command_n_file:
        execute_command_n_file.write(
            string.Template(template_execute_command_n).safe_substitute(
                commands="'" + ("'\n\t'".join(args.commands)) + "'"))

    # prepare GC
    main_config = ""
    with open(
            os.path.expandvars(
                "$CMSSW_BASE/src/Artus/Utility/data/grid-control_base_config.conf"
            ), "r") as main_config_file:
        main_config = main_config_file.read()

    backend_config = ""
    with open(
            os.path.expandvars(
                "$CMSSW_BASE/src/Artus/Configuration/data/grid-control_backend_"
                + args.batch + ".conf"), "r") as backend_config_file:
        backend_config = backend_config_file.read()

    final_config_filename = os.path.join(project_directory,
                                         "grid-control.conf")
    with open(final_config_filename, "w") as final_config_file:
        final_config_file.write(
            string.Template(main_config).safe_substitute(
                command_indices=" ".join(
                    str(index) for index in range(len(args.commands))),
                backend=backend_config))

    # run
    command = "go.py " + final_config_filename
    log.info(command)
    logger.subprocessCall(shlex.split(command))
Пример #38
0
    plot_configs = []
    event_matching_output = "eventmatching.root"
    for index, quantity in enumerate(common_quantities):
        plot_config = {}

        if args.event_matching:
            if index == 0:
                command = "eventmatching.py {input1} {input2} -t {folder1} {folder2} -f {output}".format(
                    input1=args.input_1,
                    input2=args.input_2,
                    folder1=args.folder_1,
                    folder2=args.folder_2,
                    output=event_matching_output)
                log.info(command)
                logger.subprocessCall(command, shell=True)

            plot_config["files"] = [event_matching_output]
            plot_config["folders"] = ["common1", "common2", "only1", "only2"]
            plot_config["nicks"] = ["common1", "common2", "only1", "only2"]
            plot_config["x_expressions"] = [quantity]
            plot_config["weights"] = ["(" + quantity + "> -990)"]

            plot_config.setdefault("analysis_modules", []).append("Ratio")
            plot_config["ratio_numerator_nicks"] = plot_config["nicks"][0]
            plot_config["ratio_denominator_nicks"] = plot_config["nicks"][1]

            plot_config["labels"] = [
                "common in 1", "common in 2", "only in 1", "only in 2", ""
            ]
            plot_config["legend_markers"] = ["LP", "F", "F", "F", ""]
Пример #39
0
def profile_cpp(command, profiler, profiler_opt, output_dir=None):

	log.warning("Did you compile your code with debug flags? scram b USER_CXXFLAGS=\"-g\"")

	if not isinstance(command, basestring):
		command = " ".join(command)

	if output_dir is None:
		output_dir = tempfile.mkdtemp(prefix="profile_cpp_")

	profile_commands = None
	profile_outputs = None

	if profiler == "igprof":
		if profiler_opt == "pp":
			# call C++ executable with profiler igprof
			profile_outputs = [os.path.join(output_dir, filename) for filename in ["igprof.pp.gz", "igprof.analyse.txt", "igprof.analyse.sql3"]]

			profile_commands = [
			"igprof -d -pp -z -o " + profile_outputs[0] + " " + command,
			"execute-command.sh igprof-analyse -d -v -g " + profile_outputs[0] + " > " + profile_outputs[1],
			"execute-command.sh igprof-analyse --sqlite -d -v -g " + profile_outputs[0] + " | sqlite3 " + profile_outputs[2],
			"igprof-navigator " + profile_outputs[2],
			]
		elif profiler_opt == "mp":
			# call C++ executable with profiler igprof
			profile_outputs = [os.path.join(output_dir, filename) for filename in ["igprof.mp.gz", "igprof.analyse.txt", "igprof.analyse.sql3"]]

			profile_commands = [
			"igprof -d -mp -z -o " + profile_outputs[0] + " " + command,
			"execute-command.sh igprof-analyse -d -v -g -r MEM_TOTAL " + profile_outputs[0] + " > " + profile_outputs[1],
			"execute-command.sh igprof-analyse --sqlite -d -v -g -r MEM_TOTAL " + profile_outputs[0] + " | sqlite3 " + profile_outputs[2],
			"igprof-navigator " + profile_outputs[2],
			]
		else:
			log.info(profiler_opt + "is not a valid option")

	elif profiler == "valgrind":
		if profiler_opt == "pp":
			# call C++ executable with profiler valgrind
			profile_outputs = [os.path.join(output_dir, filename) for filename in ["valgrind.callgrind.out"]]

			profile_commands = [
			"valgrind --tool=callgrind --callgrind-out-file=" + profile_outputs[0] + " " + command,
			"callgrind_annotate --auto=yes " + profile_outputs[0]
			]
		elif profiler_opt == "mp":
			# call C++ executable with profiler valgrind
			profile_outputs = [os.path.join(output_dir, filename) for filename in ["valgrind.massif.out", "valgrind.massif.txt"]]

			profile_commands = [
			"valgrind --tool=massif --massif-out-file=" + profile_outputs[0] + " " + command,
			"ms_print " + profile_outputs[0] + " > " + profile_outputs[1]
			]
		else:
			log.info(profiler_opt + "is not a valid option")

	else:
		log.info(profiler + "is not a valid profiler")

	for command in profile_commands:
		log.info("Execute \"%s\"." % command)
		logger.subprocessCall(command.split())

	if profile_outputs:
		log.info("Profiling output is written to \"%s\"." % "\", \"".join(profile_outputs))
Пример #40
0
                        default="$CMSSW_BASE/src/plots/ztt_datacards_eff/",
                        help="Output directory. [Default: %(default)s]")
    parser.add_argument(
        "--clear-output-dir",
        action="store_true",
        default=False,
        help=
        "Delete/clear output directory before running this script. [Default: %(default)s]"
    )

    args = parser.parse_args()
    logger.initLogger(args)

    args.output_dir = os.path.abspath(os.path.expandvars(args.output_dir))
    if args.clear_output_dir and os.path.exists(args.output_dir):
        logger.subprocessCall("rm -r " + args.output_dir, shell=True)

    weight_string = "(fabs(eta_2) < 1.460)"
    #weight_string = "(fabs(eta_2) < 1.460)*(decayMode_2 == 1)"
    #weight_string = "(fabs(eta_2) > 1.558)"
    #weight_string = "1.0"

    # initialisations for plotting
    if args.model == "etaufakerate":
        import HiggsAnalysis.KITHiggsToTauTau.plotting.configs.samples_run2_etaufakerate as samples
    else:
        import HiggsAnalysis.KITHiggsToTauTau.plotting.configs.samples_run2_2015 as samples

    sample_settings = samples.Samples()
    systematics_factory = systematics.SystematicsFactory()