def makeTable(users): """Print out table containing overlap between specified users""" tablehead = list(users) tablehead.insert(0, 'Overlaps') x = PrettyTable(tablehead) for user in users: userOL = [user] for user2 in users: n = userSets[user].intersection(userSets[user2]) userOL.append(str(len(n))) x.add_row(userOL) return x
def query_cli(datadefs, argv=None): if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description='Query datasets') query_group = parser.add_argument_group( title='query', description="Query parameters (ORed)") query_group.add_argument( '--name', required=False, type=str, help='"Nice" dataset name (ex: Zjets_M50)') query_group.add_argument( '--dataset', required=False, type=str, help='DBS dataset name (ex: ' '/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/*') output_group = parser.add_argument_group( title='output', description="Output parameters") output_group.add_argument( '--columns', nargs='+', help='Output columns', default=['name', 'datasetpath'], choices=['name', 'datasetpath', 'pu', 'x_sec', 'lumi_mask', 'firstRun', 'lastRun']) output_group.add_argument( '--sort', default='name', help='Column to sort by') output_group.add_argument( '--noborder', action='store_false', default=True, help='Show border') args = parser.parse_args(argv[1:]) if not args.name and not args.dataset: print "Must specify either --name or --dataset. Did you forget to quote a '*'?" sys.exit(1) table = PrettyTable(args.columns) for col in args.columns: table.set_field_align(col, 'l') for key in sorted(datadefs.keys()): value = datadefs[key] matched = False if args.name and fnmatch.fnmatch(key, args.name): matched = True if args.dataset and fnmatch.fnmatch(value['datasetpath'], args.dataset): matched = True if matched: row = [] for column in args.columns: if column == 'name': row.append(key) else: row.append(value.get(column, '-')) table.add_row(row) table.printt(sortby=args.sort, border=(args.noborder))
def query_cli(datadefs, argv=None): if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description='Query datasets') query_group = parser.add_argument_group( title='query', description="Query parameters (ORed)") query_group.add_argument( '--name', required=False, type=str, help='"Nice" dataset name (ex: Zjets_M50)') query_group.add_argument( '--dataset', required=False, type=str, help='DBS dataset name (ex: ' '/DYJetsToLL_M-50_TuneZ2Star_8TeV-madgraph-tarball/*') output_group = parser.add_argument_group( title='output', description="Output parameters") output_group.add_argument( '--columns', nargs='+', help='Output columns', default=['name', 'datasetpath'], choices=['name', 'datasetpath', 'pu', 'xsec', 'lumi_mask', 'firstRun', 'lastRun']) output_group.add_argument( '--sort', default='name', help='Column to sort by') output_group.add_argument( '--noborder', action='store_false', default=True, help='Show border') args = parser.parse_args(argv[1:]) if not args.name and not args.dataset: print "Must specify either --name or --dataset. Did you forget to quote a '*'?" sys.exit(1) table = PrettyTable(args.columns) for col in args.columns: table.set_field_align(col, 'l') for key in sorted(datadefs.keys()): value = datadefs[key] matched = False if args.name and fnmatch.fnmatch(key, args.name): matched = True if args.dataset and fnmatch.fnmatch(value['datasetpath'], args.dataset): matched = True if matched: row = [] for column in args.columns: if column == 'name': row.append(key) else: row.append(value.get(column, '-')) table.add_row(row) table.printt(sortby=args.sort, border=(args.noborder))
def makeTable(users): """Print out table containing overlap between specified users""" tablehead = list(users) tablehead.insert(0, "Overlaps") x = PrettyTable(tablehead) for user in users: userOL = [user] for user2 in users: n = userSets[user].intersection(userSets[user2]) userOL.append(str(len(n))) x.add_row(userOL) return x
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('file', help='AOD file') parser.add_argument('--wrt', default=None, help='Original to compare to') args = parser.parse_args() file_info = query_file(args.file) total = sum(x[2] for x in file_info['products']) table = PrettyTable(['Branch', 'Uncompress', 'Compressed', 'Percent']) for branch, unc, comp in file_info['products']: percent = 100. * comp / total table.add_row([branch, unc, comp, '%0.f%%' % percent]) print "Branches" print table print "Events: %i" % file_info['events'] print "Total size: %i kb" % (file_info['size'] / 1000.) print "=> %0.0f kb/evt" % (file_info['size'] / 1000. / file_info['events']) if args.wrt: wrt_info = query_file(args.wrt) print "" print "===SOURCE FILE===" print "Events: %i" % wrt_info['events'] print "Total size: %i kb" % (wrt_info['size'] / 1000.) print "=> %0.0f kb/evt" % (
import json from FinalStateAnalysis.Utilities.prettytable import PrettyTable if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('tuple_info', help='JSON file with list of published PAT tuples') parser.add_argument('output', help='Output .txt file') args = parser.parse_args() x = PrettyTable(["AOD DBS", "PAT DBS", "Files", "Events"]) x.set_field_align("AOD DBS", "l") # Left align city names x.set_field_align("PAT DBS", "l") # Left align city names x.set_padding_width(1) # One space between column edges and contents (default)== with open(args.tuple_info, 'r') as input: input_dict = json.load(input) for key in sorted(input_dict.keys()): val = input_dict[key] x.add_row([ val['parent'], key, val['nfiles'], val['nevents'], ]) with open(args.output, 'w') as output: output.write(str(x) + '\n')
from FinalStateAnalysis.Utilities.prettytable import PrettyTable if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('tuple_info', help='JSON file with list of published PAT tuples') parser.add_argument('output', help='Output .txt file') args = parser.parse_args() x = PrettyTable(["AOD DBS", "PAT DBS", "Files", "Events"]) x.set_field_align("AOD DBS", "l") # Left align city names x.set_field_align("PAT DBS", "l") # Left align city names x.set_padding_width( 1) # One space between column edges and contents (default)== with open(args.tuple_info, 'r') as input: input_dict = json.load(input) for key in sorted(input_dict.keys()): val = input_dict[key] x.add_row([ val['parent'], key, val['nfiles'], val['nevents'], ]) with open(args.output, 'w') as output: output.write(str(x) + '\n')