def print_documentation(object_name): print "--------- %s ---------" % utils.bold(object_name) print "" classes = enumerate_all_test_classes() for test_class_name, test_class in classes: arr = (object_name).split('.') if test_class_name == object_name: # get the class info print "%s: %s" % (utils.bold('Prereqs'), test_class.required_config) print "%s: %s" % (utils.bold('Collects'), test_class.collects) print "" print utils.format(test_class.__doc__) print "" print "%s:" % (utils.bold('Tests')) inst = test_class(None, {}) for method in inst.list_tests(): print method print "" sys.exit(0) elif len(arr) == 3 and ".".join(arr[:2]) == test_class_name: # get the method info print utils.format(getattr(test_class, arr[2]).__doc__) print "" sys.exit(0) print "The test name specified (%s) was incorrect. Please specify the full test name." % object_name sys.exit(0)
def print_documentation(object_name): print "--------- %s ---------" % utils.bold(object_name) print "" classes = enumerate_all_test_classes() for test_class_name, test_class in classes: arr = (object_name).split(".") if test_class_name == object_name: # get the class info print "%s: %s" % (utils.bold("Prereqs"), test_class.required_config) print "%s: %s" % (utils.bold("Collects"), test_class.collects) print "" print utils.format(test_class.__doc__) print "" print "%s:" % (utils.bold("Tests")) inst = test_class(None, {}) for method in inst.list_tests(): print method print "" sys.exit(0) elif len(arr) == 3 and ".".join(arr[:2]) == test_class_name: # get the method info print utils.format(getattr(test_class, arr[2]).__doc__) print "" sys.exit(0) print "The test name specified (%s) was incorrect. Please specify the full test name." % object_name sys.exit(0)
class Location(object): TYPES = { 'black': bold('*'), 'white': bold('o'), 'empty': '.', } def __init__(self, type): if type not in self.TYPES: raise LocationError( 'Type must be one of the following: {0}'.format( self.TYPES.keys(), )) self._type = type def __eq__(self, other): return self._type == other._type def __hash__(self): return hash(self._type) def __str__(self): return self.TYPES[self._type] def __repr__(self): return self._type.title()
def on_finish(self, **kwargs): output_dir = kwargs.get('output_dir', '') optimized = "%(optimized)s (%(input)s -> %(output)s)" optimized = optimized % dict(optimized=self._optimized, input=size_fmt(self._input_bytes), output=size_fmt(self._output_bytes)) print() print(bold("Optimized: ") + optimized, end="\t") print(bold("Skipped: ") + str(self._skipped), end="\t") print(bold("Failed: ") + str(self._failed), end="\t") print("\n") print("Optimized files were saved to:\n%s" % output_dir) print("\n\n")
def printIntro(): print(utils.bold('SDR Waterfall2Img, version %s\n' % getVersion())) print( utils.bold( 'Usage: python3 wf2img.py --f=frequency [--fStart=f1 --fEnd=f2] [--sr=sampleRate] [--sdr=receiver] [--imagewidth=imageWidth] [--imagefile=fileName] [--average=N] [--saveIQ=1] [--tStart=18:30] [--tLimit=120] [--batch="frequency;timeStart;timeEnd"]' )) print( "Run 'nohup <python3 wf2img.py parameters> &' to execute in the background" ) print( "To combine files, saved before, use: python3 fileProcessing.py --file=fileName.jpg [--delete=true]" ) print("")
def generateGridpack(carddir, sample, **kwargs): """Create a CRAB configuration and submit a given list of samples.""" cards = getCards( carddir, sample ) #[os.path.basename(f) for f in glob.glob("%s/%s_*.dat"%(carddir,sample))] copy = kwargs.get('copy', False) remove = kwargs.get('remove', True) # COPY if copy: newdir = ensureDirectory("%s_InputCards" % sample) print ">>> copying cards to '%s'..." % newdir for card in cards: shutil.copy(os.path.join(carddir, card), newdir) carddir = newdir # PRINT print ">>> " print ">>> " + '-' * 100 #print ">>> year = %s"%year print ">>> sample = '%s'" % bold(sample) print ">>> cards = '%s'" % "', '".join( c.replace(sample, '*') for c in cards) print ">>> carddir = '%s'" % bold(carddir) # CLEAN if os.path.join(sample): print ">>> " + warning( "Directory '%s' already exists! Removing..." % sample) rmcommand = "rm -rf %s" % (sample) print ">>> " + bold(rmcommand) os.system(rmcommand) # GENERATE extraopts = "" #%() gencommand = "./gridpack_generation.sh %s %s" % (sample, carddir) gencommand = gencommand.rstrip() print ">>> " + bold(gencommand) os.system(gencommand) # REMOVE if remove: rmdirs = [sample, carddir] if copy else [sample] for dir in rmdirs: rmcommand = "rm -rf %s" % (dir) print ">>> " + bold(rmcommand) os.system(rmcommand) print ">>> " + '-' * 100
def main(args): templates = args.templates cardlabel = args.cardlabel masses = args.masses outdir = args.outdir # CREATE POINTS if masses: keys = ['MASS'] params = [('MASS', masses)] #{ 'MASS': masses } else: keys = [] params = [] for param in args.params.split(':'): assert '=' in param, "Invalid format '%s'; no '=' for '%s'" % ( args.params, param) param, values = param[:param.index('=')], param[param.index('=') + 1:].split(',') assert param not in keys, error("Key '%s' defined multiple times!" % param) keys.append(param) params.append((param, values)) #params[param] = values if not cardlabel: cardlabel = '_'.join(k[0] + '$' + k for k in keys) if 'OUTPUT' not in keys: keys.append('OUTPUT') params.append(('OUTPUT', ["$SAMPLE_%s" % cardlabel])) points = list(itertools.product(*[v for k, v in params])) # PRINT print ">>> " + '=' * 90 print ">>> templates = %s" % ', '.join(bold(t) for t in templates) print ">>> cardlabel = '%s'" % cardlabel print ">>> massses = %s" % masses print ">>> params = %s" % ', '.join("%s: %s" % (bold(k), l) for k, l in params) print ">>> " + '=' * 90 # CREATE CARDS for values in points: kwargs = {} for key, value in zip(keys, values): kwargs[key] = value for template in templates: cardname = makeCardName(template, cardlabel, outdir, **kwargs) makeCard(template, cardname, outdir, verbose=True, **kwargs)
def __init__(self,year,dtype='mc',verbose=True): assert year in [2016,2017,2018], "Year should be 2016, 2017 or 2018" assert dtype in ['mc','data'], "Wrong data type '%s'! It should be 'mc' or 'data'!"%dtype isData = dtype=='data' jsonfile = "json/tau_triggers_%d.json"%year channels = ['etau','mutau','ditau','mutau_SingleMuon','etau_SingleElectron'] trigdata = loadTriggerDataFromJSON(jsonfile,isData=isData,verbose=verbose) triggers = { } trigmatcher = { } for channel in channels: triggers[channel] = trigdata.combdict[channel.replace('etau_','').replace('mutau_','')] trigmatcher[channel] = TrigObjMatcher(triggers[channel]) print ">>> %s:"%bold("'%s' trigger object matcher"%channel) print ">>> '%s'"%(trigmatcher[channel].path) self.eleptmin = 25 self.muptmin = 21 self.tauptmin = 40 self.channels = channels self.crosstrigs = [c for c in channels if 'Single' not in c] self.isData = isData self.verbose = verbose self.triggers = trigdata self.trigmatcher = trigmatcher
def action_exit(): clear_screen() print("Thank you for using Delegate Helper!") print("Have a great day :)") print("\n") print(bold("Exited successfully!")) exit(0)
def estimate_average_fidelity(p, q, k, n, r, qc_runner, number_of_iterations=1): if VERBOSE: print("Running optimization with params p and q:") print("p =", p) print("q =", q) fidelities = [] # We calculate the fidelity for L different S samples for i in range(NUMBER_OF_CYCLES_FOR_AVERAGE_FIDELITY_CALCULATION): qc = get_new_qc(get_quantum_register(n + r), get_classical_register(n + r)) # sample the state S init_qc = get_sample_state_qc(k) encode_qc = get_encode_qc(range(n), p) recovery_qc = get_recovery_qc(range(n + r), q) noisy_I = get_I(n) init_reg = range(k) encode_reg = range(n) recover_reg = range(n + r) noise_reg = range(n) # init the state S # apply noisy encoding(q) # apply recovery(p) # apply (noisy encoding)^(dagger) = decoding(q) # apply (S)^(dagger) qc.append(init_qc, init_reg) qc.append(encode_qc, encode_reg) for i in range(number_of_iterations): qc.unitary(noisy_I, noise_reg, label='noisy_I') qc.append(recovery_qc, recover_reg) qc.append(encode_qc.inverse(), encode_reg) qc.append(init_qc.inverse(), init_reg) # run the circuit and get results # the refresh qubits should work without noise? fidelity = qc_runner.get_fidelity(qc) fidelities.append(fidelity) # estimate the average fidelity for q and p average_fidelity = sum(fidelities) / len(fidelities) if VERBOSE: print( bold( f"For current p and q average fidelity was: {average_fidelity}" )) print("=========================================") print() return average_fidelity
def print_all_test_classes(): print "---------- %s ---------" % utils.bold("Test List") classes = enumerate_all_test_classes() for test_class_name, test_class in classes: obj = test_class('nonexistent_session', {}) for test_name in obj.list_tests(): print "%s.%s" % (test_class_name, test_name) sys.exit(0)
def print_all_test_classes(): print "---------- %s ---------" % utils.bold("Test List") classes = enumerate_all_test_classes() for test_class_name, test_class in classes: obj = test_class("nonexistent_session", {}) for test_name in obj.list_tests(): print "%s.%s" % (test_class_name, test_name) sys.exit(0)
def main(args): years = args.years pset = args.pset samples = args.samples vetoes = args.vetoes priority = args.priority force = args.force test = args.test splitting = 'EventAwareLumiBased' tag = "" #"DeepTau2017v2p1" # SAMPLES if 'nanoaod' in pset.lower(): import samples_nanoAOD samplesets = samples_nanoAOD.samples else: import samples_miniAOD samplesets = samples_miniAOD.samples # WARNING if len(years) >= 2: print bold( ">>> Warning! More than one year was given. If you load the CMSSW configuration of different years," ) print bold( ">>> You might get an error that CMSSW configuration files cannot be loaded more than once in memory..." ) # SUBMIT for year in years: datasets = samplesets.get(year, []) if samples: datasets = filterSamplesWithPattern(datasets, samples) if vetoes: datasets = filterSamplesWithPattern(datasets, vetoes, veto=True) submitSampleToCRAB(pset, year, datasets, tag=tag, priority=priority, test=test, force=force, split=splitting)
async def on_message(message): if message.author == client.user: return name = NameManager.name_in_message(message.content) if (name is not None): await message.channel.send(utils.bold(InsultManager.send_insult(name))) for command_key in commands.commands: if (message.content.startswith(command_key)): await message.channel.send(commands.commands[command_key]( message.content.partition(' ')[2]))
def remove_experiment(name): experiments = experiment_scheduler.load_experiments( cluster=None, filter_eq_dct=dict(name=name)) if len(experiments) == 0: print "No experiments in database %s" % get_db_string("experiments") return experiment = experiments[0] table_name = experiment["table"] if query_yes_no("Do you really want to delete experiment %s?" % bold(name)): print "Deleting %s..." % name experiment_scheduler.delete_experiments([experiment]) if query_yes_no("Do you want to delete corresponding jobs?"): jobs = job_scheduler.load_jobs(table_name) print "Deleting %d jobs..." % len(jobs) job_scheduler.delete_jobs(table_name, jobs)
def boosted_softmax_regression(train_set, test_set, verbose, l_rate, n_classes, n_epochs, batch_size, m_stop, eta, activation, plot_file): model = Softmax( data=train_set, n_classes=n_classes, l_rate=l_rate, n_epochs=n_epochs, batch_size=batch_size, activation=activation, verbose=verbose ) if verbose: print bold("Stop 0") model.fit() accuracies = list() predictions = model.predict(test_set[:, :-1]) predictions = np.argmax(predictions, axis=1) accuracies.append(np.mean(test_set[:, -1] == predictions) * 100) if verbose: print bold("Stop 0") + ", Test Accuracy: % f\n" % accuracies[-1] for stop in range(m_stop - 1): if verbose: print "\033[1mStop %d\033[0m" % (stop + 1) model.fit(boosted=True) predictions = model.predict(test_set[:, :-1]) predictions = np.argmax(predictions, axis=1) accuracies.append(np.mean(test_set[:, -1] == predictions) * 100) if verbose: print bold("Stop %d" % (stop+1)) + \ ", Test Accuracy: % f\n" % accuracies[-1] if model.converged: break np.save(plot_file, accuracies) plt.plot(accuracies) plt.savefig("plots/boosted-sft-plot.png") plt.close() return predictions, model
def list_experiments(cluster): experiments = experiment_scheduler.load_experiments(cluster) if len(experiments) == 0: print "No experiments in database %s" % get_db_string("experiments") for experiment in experiments: print "Name:", bold(experiment["name"]) print " Id: %d" % experiment["jobman"]["id"] for key in ["table", "gpu"]: print " %s = %s" % (key, str(experiment[key])) for name, cluster in experiment["clusters"].iteritems(): print " %s:" % name for key, value in sorted(cluster.iteritems()): print " %s = %s" % (key, value) list_jobs(experiment["table"]) print
def action_list_degrees(): clear_screen() degrees = api.get_degrees() degs = sorted( [d for d in degrees if d.acronym[0] == 'M' or d.acronym[0] == 'L'], key=lambda d: d.acronym) degree_menu = SelectMenu({}, message='Degree Selection Menu') opt = 0 choices = [] for deg in degs: opt += 1 choices.append(f"{opt}. {deg}") degree_menu.add_choices(choices) result = degree_menu.select() print(bold(result[2:])) change_selected_degree(degs[choices.index(result)].id) return list_menu.select_action()
def action_list_degree_courses(degree_id: str): if degree_id == "": return list_menu.select_action(message="First Select a Degree!", clear_before=True) clear_screen() courses = api.get_degree_courses(degree_id) cors = sorted([c for c in courses], key=lambda c: c.acronym) course_menu = SelectMenu({}, message='Course Selection Menu') opt = 0 choices = [] for cor in cors: opt += 1 choices.append(f"{opt}. {cor}") course_menu.add_choices(choices) result = course_menu.select() print(bold(result[2:])) change_selected_course(cors[choices.index(result)].id) return list_menu.select_action()
def submitSampleToCRAB(pset, year, samples, **kwargs): """Create a CRAB configuration and submit a given list of samples.""" assert isinstance( samples, list), "Samples list should be a list or tuple! Given %s" % samples # USER OPTIONS year = year test = kwargs.get('test', 0) force = kwargs.get('force', False) datatier = 'nanoAOD' if 'nanoaod' in pset.lower() else 'miniAOD' version = re.findall("(?<=AOD)v\d+", pset) version = version[0] if version else "" pluginName = 'Analysis' #'PrivateMC' splitting = kwargs.get( 'split', 'FileBased') #if year==2018 or datatier=='nanoAOD' else 'Automatic') tag = kwargs.get('tag', "") instance = kwargs.get('instance', 'global') nevents = -1 unitsPerJob = 1 # files per job for 'FileBased' eventsPerJob = kwargs.get( 'eventsPerJob', 10000) # unitsPerJob for 'EventAwareLumiBased' splitting njobs = -1 ncores = kwargs.get('ncores', 1) # make sure nCores > nThreads in pset.py maxRunTime = kwargs.get('maxRunTime', 6 * 60) #1250 # minutes maxMemory = kwargs.get('maxMemory', 3000) # MB priority = kwargs.get('priority', 10) workArea = "crab_tasks" #"crab_projects" outdir = '/store/user/%s/%s_%s%s' % (getUsernameFromSiteDB(), datatier, year, formatTag(tag)) publish = True #and False site = 'T2_CH_CSCS' # OVERRIDE if test > 0: splitting = 'FileBased' unitsPerJob = 1 # files per job njobs = int(test) outdir += '_test' publish = False samples = samples[:1] if nevents < 0: nevents = 2500 if splitting == 'Automatic': unitsPerJob = -1 njobs = -1 maxRunTime = -1 if splitting == 'EventAwareLumiBased': unitsPerJob = eventsPerJob njobs = -1 # PRINT print ">>> " + '=' * 70 print ">>> year = %s" % year print ">>> pset = '%s'" % bold(pset) print ">>> pluginName = '%s'" % pluginName print ">>> splitting = '%s'" % splitting print ">>> unitsPerJob = %s" % unitsPerJob print ">>> nevents = %s" % nevents print ">>> tag = '%s'" % bold(tag) print ">>> njobs = %s" % njobs print ">>> nCores = %s" % ncores print ">>> maxRunTime = %s" % maxRunTime print ">>> maxMemory = %s" % maxMemory print ">>> priority = %s" % priority print ">>> workArea = '%s'" % workArea print ">>> site = '%s'" % site print ">>> outdir = '%s'" % outdir print ">>> publish = %r" % publish print ">>> test = %r" % test print ">>> " + '=' * 70 if len(samples) == 0: print ">>> No samples given..." print ">>> " return # CRAB CONFIGURATION config = crabconfig() config.General.workArea = workArea config.General.transferOutputs = True config.General.transferLogs = False config.JobType.pluginName = pluginName config.JobType.psetName = pset config.JobType.pyCfgParams = ["year=%s" % year, "nThreads=%s" % ncores] config.JobType.numCores = ncores if maxRunTime > 0: config.JobType.maxJobRuntimeMin = maxRunTime # minutes if maxMemory > 0: config.JobType.maxMemoryMB = maxMemory # MB config.JobType.priority = priority config.Data.splitting = splitting if unitsPerJob > 0: config.Data.unitsPerJob = unitsPerJob if njobs > 0: config.Data.totalUnits = unitsPerJob * njobs config.Site.storageSite = site config.Data.outLFNDirBase = outdir config.Data.publication = publish for dataset in samples: # INDIVIDUAL CONFIG request = (datatier.lower().replace('aod', '') + '_' + shortenDASPath(dataset))[:100] private = dataset.endswith('/USER') sites = getSampleSites(dataset, instance=None) if private: ignoreLocal = True inputDBS = "https://cmsweb.cern.ch/dbs/prod/phys03/DBSReader/" whitelist = getOptimalWhitelist(sites, instance=instance) #whitelist = ['T2_CH_*','T2_DE_*','T2_IT_*'] else: ignoreLocal = False inputDBS = "https://cmsweb.cern.ch/dbs/prod/%s/DBSReader/" % instance whitelist = [] outtag = createDatasetOutTag(dataset, tag=tag, datatier=datatier, version=version, year=year) # PRINT print ">>> " + '-' * 5 + " Submitting... " + '-' * 50 print ">>> request = '%s'" % bold(request) print ">>> dataset = '%s'" % bold(dataset) print ">>> inputDBS = '%s'" % inputDBS print ">>> sites = %s" % sites print ">>> whitelist = %s" % whitelist print ">>> ignoreLocal = %s" % ignoreLocal print ">>> outtag = '%s'" % outtag print ">>> " + '-' * 70 # INDIVIDUAL CONFIG config.General.requestName = request # max. 100 characters config.Data.inputDataset = dataset config.Data.inputDBS = inputDBS #config.Data.outputPrimaryDataset = 'LQ_test' # only for 'PrivateMC' config.Data.outputDatasetTag = outtag config.Data.ignoreLocality = ignoreLocal # do not run on same site the dataset is stored on if whitelist: config.Site.whitelist = whitelist print str(config).rstrip('\n') print ">>> " + '-' * 70 # SUBMIT if force: print ">>> Do you want to submit this job to CRAB? [y/n]? force" print ">>> Submitting..." submitCRABConfig(config) else: while True: submit = raw_input( ">>> Do you want to submit this job to CRAB? [y/n]? " ).strip().lower() if any(s in submit for s in ['quit', 'exit']): print ">>> Exiting..." exit(0) elif 'force' in submit: submit = 'y' force = True if 'y' in submit: print ">>> Submitting..." submitCRABConfig(config) break elif 'n' in submit: print ">>> Not submitting." break else: print ">>> '%s' is not a valid answer, please choose 'y' or 'n'." % submit print ">>> "
def get_options(argv): parser = argparse.ArgumentParser() parser.add_argument("-c", "--cluster", default=None, help=""" WRITEME""") parser.add_argument("-v", "--verbose", action="store_true", help=""" WRITEME""") subparsers = parser.add_subparsers(dest='command') launch_parser = subparsers.add_parser(LAUNCH) run_parser = subparsers.add_parser(RUN) monitor_parser = subparsers.add_parser(MONITOR) list_parser = subparsers.add_parser(LIST) set_parser = subparsers.add_parser(SET) reset_parser = subparsers.add_parser(RESET) remove_parser = subparsers.add_parser(REMOVE) plot_parser = subparsers.add_parser(PLOT) for subparser in [launch_parser, monitor_parser, list_parser]: subparser.add_argument("-c", "--cluster", default=None, help=""" WRITEME""") # Launch parser arguments launch_parser.add_argument("-l", "--limit", type=int, default=0, help=""" Limit the number of jobs that can be launched""") launch_parser.add_argument("-e", "--experiment", help=""" Only launch the given experiment name""") # Run parser arguments run_parser.add_argument("main_function_path", help=""" WRITEME""") run_parser.add_argument("experiment_config", help=""" WRITEME""") run_parser.add_argument("job_config", help=""" WRITEME""") run_parser.add_argument("-f", "--force", action="store_true", help=""" WRITEME""") # Set parser arguments set_parser.add_argument("experiment_name", help=""" WRITEME""") set_parser.add_argument( "job_status", choices=["pending", "running", "completed", "broken"], help=""" WRITEME""") set_parser.add_argument( "new_status", choices=["pending", "running", "completed"], help=""" WRITEME""") # Reset parser arguments reset_parser.add_argument("experiment_name", help=""" WRITEME""") reset_parser.add_argument( "job_status", choices=["running", "completed", "broken"], help=""" WRITEME""") # Remove parser arguments remove_parser.add_argument("name", help=""" WRITEME""") # Plot parser arguments plot_parser.add_argument("experiment_name", help=""" WRITEME""") options = parser.parse_args(argv) if options.cluster: print "------------------------%s" % ("-" * len(options.cluster)) print "Experiments for cluster %s" % bold(options.cluster) print "------------------------%s\n" % ("-" * len(options.cluster)) else: print "-----------------------------" print "Experiments for %s clusters" % bold("all") print "-----------------------------\n" return options
# Universal SDR IQ/waterfall image saver. # (c) 2017 Dmitrii ([email protected]) import logging import optparse import utils import imageProcessing import fileProcessing import sys from version import * if __name__ == '__main__': print(utils.bold('SDR Wav2Img '+ getVersion())) parser = optparse.OptionParser() parser.add_option("--input", dest="fileInput", help="WAV file name", default="") parser.add_option("--output", dest="fileOutput", help="Image file name", default="") parser.add_option("--imagewidth", dest="imagewidth", help="image width", default=1024) parser.add_option("--average", dest="average", help="FFT average", default=1) options, args = parser.parse_args() fileInput = options.fileInput if len(fileInput) == 0: print("Run 'python3 wav2img.py --input=file.wav [--output=file.jpg] [--imagewidth=1024] [--average=1]'") sys.exit(0) fileOutput = options.fileOutput if len(options.fileOutput) > 0 else fileInput.replace(".wav", ".jpg") imageWidth = int(options.imagewidth) average = int(options.average) print("Convert {} to {}".format(fileInput, fileOutput))
import sys import json from pymongo import MongoClient from pymongo.errors import DuplicateKeyError from utils import \ get_valid_token, twitter_track, bold, format_collection_name tokens = 'tokens/' DB = MongoClient().twitter_experiment_1 if __name__ == '__main__': arg = sys.argv[1:] if len(arg) < 1: raise kw = ' '.join(arg) api_token = get_valid_token(tokens) if api_token: collection_name = format_collection_name(kw) print('tracking keyword', bold(kw), 'and storing in collection', bold(collection_name)) for tweet in twitter_track(kw, api_token).iter_lines(): if tweet: try: DB[collection_name].insert(json.loads(tweet.decode())) except DuplicateKeyError: print('NOTE: tweet already exists in collection') pass else: print('No valid API token available in {}.'.format(tokens))
print("Output folder:", outputFolder) print("Save waterfall:", saveWaterfall) print("Save IQ:", saveIQ) print("") for index, frequency in enumerate(frequencies): # Initialize SDR device sdr.setCenterFrequency(frequency) sdr.startStream() timeStart = timesStart[index] if index < len(timesStart) else None timeEnd = timesEnd[index] if index < len(timesEnd) else None timeLimit = timesLimit[index] if index < len(timesLimit) else 9999999 # Show status print(utils.bold("Task {} of {}".format(index + 1, len(frequencies)))) print("Start time:", "-" if timeStart is None else timeStart) print("End time:", "-" if timeEnd is None else timeEnd) print("Limit in seconds:", "-" if timeLimit == 9999999 else timeLimit) print("") # Wait for the start if timeStart is not None: while True: now = datetime.datetime.now() diff = int((timeStart - now).total_seconds()) print( "{:02d}:{:02d}:{:02d}: Recording will be started after {}m {:02d}s..." .format(now.hour, now.minute, now.second, int(diff / 60), diff % 60)) time.sleep(1)
def main(args): carddir = args.carddir sample = args.sample years = args.years cardlabel = args.cardlabel masses = args.masses params = args.params tag = args.tag copy = args.copy remove = not args.keep test = args.test cmsswdir = "/work/areimers/CMSSW_10_2_10" genproddir = "genproductions/bin/MadGraph5_aMCatNLO" workdir = "%s/src/%s" % (cmsswdir, genproddir) assert os.path.isdir(cmsswdir), error( "CMSSW directory '%s' does not exists!" % (cmsswdir)) assert os.path.isdir(workdir), error( "Working directory '%s' does not exists!" % (workdir)) oldcarddir = carddir[:] # CHECK environment assert not os.getenv('CMSSW_BASE'), error( "A CMSSW environment is set. Please retry in a clean shell session.") assert os.getenv('CMS_PATH'), error( "No CMS default environment set! Please do 'source $VO_CMS_SW_DIR/cmsset_default.sh' first." ) # CREATE POINTS if masses: keys = ['MASS'] params = [('MASS', masses)] #{ 'MASS': masses } else: keys = [] params = [] if args.params: for param in args.params.split(':'): assert '=' in param, "Invalid format '%s'; no '=' for '%s'" % ( args.params, param) param, values = param[:param.index('=')], param[param.index('=') + 1:].split(',') assert param not in keys, error( "Key '%s' defined multiple times!" % param) keys.append(param) params.append((param, values)) #params[param] = values if not cardlabel: cardlabel = '_'.join(k[0] + '$' + k for k in keys) if 'OUTPUT' not in keys: keys.append('OUTPUT') params.append(('OUTPUT', ["$SAMPLE_%s" % cardlabel])) if params: points = list(itertools.product(*[v for k, v in params])) pattern = os.path.join(carddir, "%s_template*.dat" % (sample)) templates = glob.glob(pattern) assert templates, error("Did not find any template cards '%s' in %s!" % (os.path.basename(pattern), carddir)) else: points = [] templates = [] # PRINT print ">>> " + '=' * 90 print ">>> cmsswdir = '%s'" % cmsswdir print ">>> genproddir = '%s'" % genproddir print ">>> workdir = '%s'" % workdir print ">>> sample = '%s'" % sample print ">>> carddir = '%s'" % bold(carddir) print ">>> params = %s" % ', '.join("%s: %s" % (bold(k), l) for k, l in params) print ">>> templates = '%s'" % "', '".join( bold(os.path.basename(t)) for t in templates) print ">>> " + '=' * 90 # GENERATE if points: samplenames = [] for values in points: kwargs = {} for key, value in zip(keys, values): kwargs[key] = value for template in templates: cardname = makeCardName(template, cardlabel, **kwargs) makeCard(template, cardname, **kwargs) samplenames.append("%s_%s" % (sample, makeCardLabel(cardlabel, **kwargs))) carddir = os.path.relpath(carddir, workdir) os.chdir(workdir) for samplename in samplenames: generateGridpack(carddir, samplename, remove=remove, copy=copy) else: carddir = os.path.relpath(carddir, workdir) os.chdir(workdir) generateGridpack(carddir, sample, remove=remove, copy=copy)
# Universal SDR waterfall image saver. # (c) 2017 Dmitrii ([email protected]) from wf2img import SDR import wf2img import utils if __name__ == '__main__': print(utils.bold("SDR Waterfall2Img version " + wf2img.getVersion())) sdr = SDR() # List all connected SoapySDR devices devices = sdr.listDevices() print("Receivers found:", len(devices)) print("---") for d in devices: sdr.initDevice(driverName=d['driver']) print("Name:", d['driver']) print("Description:", d['label']) print("Gains:", sdr.getGains()) print("Sample rates:") print(sdr.getSampleRates()) print("")
frame = hists[0] for ibin in xrange(1,frame.GetXaxis().GetNbins()+1): xbin = frame.GetBinLowEdge(ibin) frame.GetXaxis().SetBinLabel(ibin,str(int(xbin))) plotHists(hists,xtitle,plotname,header,ctexts,otext=otext) # CUTFLOW cutflow = file.Get("cutflow_%s"%channel) cutflow.SetTitle(trigger) cutflow.GetXaxis().SetRange(1,8) pair = cutflow.GetBinContent(5) match = cutflow.GetBinContent(6) if pair: eff = match/pair error = sqrt(eff*(1.-eff)/pair)*100.0 print ">>> %s pair selection -> trigger-matching = %d/%d = %s"%(channel,match,pair,bold("%.2f +- %.2f%%"%(100.0*(match-pair)/pair,error))) else: print ">>> %s pair selection -> trigger-matching = %d/%d ..."%(channel,match,pair) if cutflow.GetBinContent(1)>0: cutflow.Scale(100./cutflow.GetBinContent(1)) else: print "Warning! Cutflow '%s' is empty!"%cutflow.GetName() cutflows.append(cutflow) # PLOT CUTFLOW print ">>> plotting cutflows" header = "Channel" plotname = "%s/cutflow_%s"%(outdir,postfix) plotHists(cutflows,"",plotname,header,logy=True,otext=otext,y1=0.8) file.Close()
def on_start(self): print("\n%s %45s %40s\n" % (bold("FILE"), bold("STATUS"), bold("RATIO")))
def loadTriggerDataFromJSON(filename,channel=None,isData=True,verbose=False): """Help function to load trigger path and object information from a JSON file. The JSON format is as follows: 'year' -> year 'filterbits' -> object type ('Electron', 'Muon', 'Tau', ...) -> shorthand for filters patterns in nanoAOD -> bits (powers of 2) 'hltcombs' -> data type ('data' or 'mc') -> tau trigger type (e.g. 'etau', 'mutau', 'ditau', 'SingleMuon', ...) -> list of recommended HLT paths 'hltpaths' -> HLT path ("HLT_*") -> 'runrange': in case this path was only available in some data runs (optional) -> 'filter': last filter associated with this trigger path ("hlt*") -> object type ('Electron', 'Muon', 'Tau', ...) -> 'ptmin': offline cut on pt -> 'etamax': offline cut on eta (optional) -> 'filterbits': list of shorthands for filter patterns Returns a named tuple 'TriggerData' with attributes trigdict = dict of trigger path -> 'Trigger' object combdict = dict of channel -> list of combined triggers ('Trigger' object) """ if verbose: print ">>> loadTriggerDataFromJSON: loading '%s'"%(filename) datatype = 'data' if isData else 'mc' channel_ = channel triggers = [ ] combdict = { } trigdict = { } # OPEN JSON with open(filename,'r') as file: data = yaml.safe_load(file) for key in ['filterbits','hltpaths']: assert key in data, "Did not find '%s' key in JSON file '%s'"%(key,filename) # FILTER BIT DICTIONARY: object type -> filterbit shortname -> bit bitdict = data['filterbits'] # HLT PATHS with corresponding filter bits, pt, eta cut for path, trigobjdict in data['hltpaths'].iteritems(): runrange = trigobjdict.get('runrange',None) if isData else None filters = [ ] for obj in objects: # ensure order if obj not in trigobjdict: continue if obj not in bitdict: raise KeyError("Did not find '%s' in filter bit dictionary! Please check the JSON file '%s' with bitdict = %s"%(obj,filename,bitdict)) ptmin = trigobjdict[obj].get('ptmin', 0.0) etamax = trigobjdict[obj].get('etamax',6.0) filterbits = trigobjdict[obj]['filterbits'] filter = TriggerFilter(obj,filterbits,ptmin,etamax) filter.setbits(bitdict[obj]) filters.append(filter) assert len(filters)>0, "Did not find any valid filters for '%s' in %s"%(path,trigobjdict) filters.sort(key=lambda f: (objects.index(f.type),-f.ptmin)) # order by 1) object type, 2) ptmin trigger = Trigger(path,filters,runrange=runrange) triggers.append(trigger) trigdict[path] = trigger # COMBINATIONS OF HLT PATHS if 'hltcombs' in data: if channel_: assert channel_ in data['hltcombs'][datatype], "Did not find channel '%s' in JSON file! Available: '%s'"%(channel_,"', '".join(data['hltcombs'][datatype].keys())) for channel, paths in data['hltcombs'][datatype].iteritems(): if channel_ and channel!=channel_: continue #combtrigs = [trigdict[p] for p in paths] combdict[channel] = [trigdict[p] for p in paths] #TriggerCombination(combtrigs) # PRINT triggers.sort(key=lambda t: t.path) if verbose: print ">>> %s:"%bold("triggers & filters") for trigger in triggers: if channel_ and trigger not in combdict[channel_]: continue print ">>> %s"%(trigger.path) for filter in trigger.filters: print ">>> %-9s %r"%(filter.type+':',filter.name) #,"bits=%s"%filter.bits print ">>> %s:"%bold("trigger combinations for %s"%datatype) for channel, triglist in combdict.iteritems(): if channel_ and channel!=channel_: continue print ">>> %s"%(channel) for trigger in triglist: path = "'%s'"%trigger.path if trigger.runrange: path += ", %d <= run <= %d"%(trigger.runrange[0],trigger.runrange[1]) print ">>> "+path return TriggerData(trigdict,combdict)
tokens = 'tokens/' DB = MongoClient().twitter_experiment_1 def loop(query, token, collection, oldmax=None): tweets, newmax = twitter_search(query, token, max_id=oldmax) if oldmax: print('storing',len(tweets),'tweets since',oldmax) for t in tweets: try: collection.insert(t) except DuplicateKeyError: print('NOTE: tweet already exists in collection') pass if not oldmax or newmax < oldmax: print('running with max_id=',newmax) loop(query, token, collection, newmax) if __name__ == '__main__': arg = sys.argv[1:] if len(arg) < 1: raise query = ' '.join(arg) api_token = get_valid_token(tokens) if api_token: collection_name = format_collection_name(query) print('running for query', bold(query), 'and storing in collection', bold(collection_name)) loop(query, api_token, DB[collection_name]) else: print('No valid API token available in {}.'.format(tokens))