def _main():
    parser = ScriptOptions(
        usage=
        '%s [OPTIONS] <data path> <dataset name> <pattern (*.root) / files>')
    parser.add_text(None,
                    'p',
                    'path',
                    dest='dataset',
                    default='.',
                    help='Path to dataset files')
    parser.add_bool(None,
                    'r',
                    'recurse',
                    dest='source recurse',
                    default=False,
                    help='Recurse into subdirectories if supported')
    add_dataset_list_options(parser)
    options = parser.script_parse(
        arg_keys=['dataset', 'dataset name pattern', 'filename filter'])

    def _conditional_set(target, cond, value):
        if options.config_dict.get(
                cond) and not options.config_dict.get(target):
            options.config_dict[target] = value

    _conditional_set('dataset name pattern', 'delimeter dataset key',
                     '/PRIVATE/@DELIMETER_DS@')
    _conditional_set('block name pattern', 'delimeter block key',
                     '@DELIMETER_B@')
    discover_dataset('ScanProvider', options.config_dict)
def _main():
	parser = ScriptOptions()
	parser.add_text(None, 's', 'se', default=None,
		help='Resolve LFN on CMS SE into PFN')
	parser.add_text(None, ' ', 'se-prot', default='srmv2',
		help='Name of default SE protocol [Default:%s]')
	parser.add_text(None, ' ', 'lfn', default='/store/user/<hypernews_name>',
		help='Name of default LFN [Default:%s]')
	options = parser.script_parse()

	if options.opts.se:
		return _lookup_pfn(options)
示例#3
0
def _main():
	parser = ScriptOptions(usage='%s [OPTIONS] <BasePlugin>')
	parser.add_bool(None, 'a', 'show_all', default=False, help='Show plugins without user alias')
	parser.add_bool(None, 'p', 'parents', default=False, help='Show plugin parents')
	parser.add_bool(None, 'c', 'children', default=False, help='Show plugin children')
	options = parser.script_parse()
	if len(options.args) != 1:
		parser.exit_with_usage()
	pname = options.args[0]
	if options.opts.parents:
		def _get_cls_info(cls):
			return {'Name': cls.__name__, 'Alias': str.join(', ', lidfilter(cls.get_class_name_list()[1:]))}
		display_plugin_list(lmap(_get_cls_info, Plugin.get_class(pname).iter_class_bases()),
			show_all=True, sort_key=None, title='Parents of plugin %r' % pname)
	else:
		sort_key = 'Name'
		if options.opts.children:
			sort_key = 'Inherit'
		display_plugin_list(get_plugin_list(pname, inherit_prefix=options.opts.children),
			show_all=options.opts.children or options.opts.show_all,
			sort_key=sort_key, title='Available plugins of type %r' % pname)
示例#4
0
def _main():
    parser = ScriptOptions(usage='%s [OPTIONS] <DBS dataset path>')
    parser.add_text(None,
                    '',
                    'producer',
                    default='SimpleNickNameProducer',
                    help='Name of the nickname producer')
    parser.add_bool(None,
                    'L',
                    'nick-list',
                    default=False,
                    help='List available nickname producer classes')
    options = parser.script_parse()

    if options.opts.nick_list:
        display_plugin_list_for('NickNameProducer',
                                title='Available nickname producer classes')
    if not options.args:
        parser.exit_with_usage()

    dataset_path = options.args[0]
    if ('*' in dataset_path) or os.path.exists(dataset_path):
        dataset_provider = 'DBS3Provider'
        if os.path.exists(dataset_path):
            dataset_provider = 'ListProvider'
        provider = Plugin.create_instance(dataset_provider, gc_create_config(),
                                          'dataset', dataset_path)
        dataset_path_list = provider.get_dataset_name_list()
    else:
        dataset_path_list = [dataset_path]

    nn_prod = Plugin.get_class('NickNameProducer').create_instance(
        options.opts.producer, gc_create_config(), 'dataset')
    ConsoleTable.create([(1, 'Dataset'), (0, 'Nickname')],
                        lmap(
                            lambda ds: {
                                0: nn_prod.get_name('', ds, None),
                                1: ds
                            }, dataset_path_list), 'll')
def _main():
	parser = ScriptOptions(usage='%s [OPTIONS] <data path> <dataset name> <pattern (*.root) / files>')
	parser.add_text(None, 'p', 'path', dest='dataset', default='.',
		help='Path to dataset files')
	parser.add_bool(None, 'r', 'recurse', dest='source recurse', default=False,
		help='Recurse into subdirectories if supported')
	add_dataset_list_options(parser)
	options = parser.script_parse(arg_keys=['dataset', 'dataset name pattern', 'filename filter'])

	def _conditional_set(target, cond, value):
		if options.config_dict.get(cond) and not options.config_dict.get(target):
			options.config_dict[target] = value

	_conditional_set('dataset name pattern', 'delimeter dataset key', '/PRIVATE/@DELIMETER_DS@')
	_conditional_set('block name pattern', 'delimeter block key', '@DELIMETER_B@')
	discover_dataset('ScanProvider', options.config_dict)
示例#6
0
def _main():
    parser = ScriptOptions()
    parser.add_text(None,
                    None,
                    'url',
                    default='http://pccmsdqm04.cern.ch/runregistry/xmlrpc',
                    help='URL to runregistry [Default:%s]')
    parser.add_text(
        None,
        None,
        'run',
        default='Collisions10',
        help=
        'Specify run era that will be queried for the lumi json file [Default:%s]'
    )
    options = parser.script_parse()
    server_proxy_cls = resolve_fun('xmlrpc.client:ServerProxy',
                                   'xmlrpclib:ServerProxy')
    server = server_proxy_cls(options.opts.url).DataExporter
    data = server.export('RUNLUMISECTION', 'GLOBAL', 'json',
                         {'groupName': options.opts.run})
    logging.getLogger('script').info(json.dumps(data))
def _main():
	parser = ScriptOptions(usage='%s [OPTIONS] <config file / work directory>')
	parser.add_text(None, 'J', 'job-selector', dest='external job selector', default='',
		help='Specify which jobs to process')
	parser.add_text(None, 'i', 'info-scanner',
		help='Specify which info scanner to run')
	parser.add_text(None, 'm', 'event-mode', dest='mode', default='CMSSW-Out',
		help='Specify how to determine events - available: [CMSSW-Out], CMSSW-In, DataMod')
	parser.add_text(None, 'l', 'lfn', dest='lfn marker', default='/store/',
		help='Assume everything starting with marker to be a logical file name')
	parser.add_bool(None, 'c', 'config', dest='include config infos', default=False,
		help='CMSSW specific: Add configuration data to metadata')
	parser.add_bool(None, 'p', 'parents', dest='include parent infos', default=False,
		help='CMSSW specific: Add parent infos to metadata')
	add_dataset_list_options(parser)
	options = parser.script_parse(arg_keys=['dataset'])

	# Positional parameters override options
	if not options.args:
		parser.exit_with_usage()
	tmp = {'cmssw-out': 'CMSSW_EVENTS_WRITE', 'cmssw-in': 'CMSSW_EVENTS_READ', 'datamod': 'MAX_EVENTS'}
	if options.opts.info_scanner:
		options.config_dict['scanner'] = options.opts.info_scanner.replace(',', ' ')
	options.config_dict['events key'] = tmp.get(options.config_dict['mode'].lower(), '')
	sys.exit(discover_dataset('GCProvider', options.config_dict))
def _parse_cmd_line():
    help_msg = '\n\nDEFAULT: The default is to download the SE file and check them with MD5 hashes.'
    help_msg += '\n * In case all files are transferred sucessfully, the job is marked'
    help_msg += '\n   as already downloaded, so that the files are not copied again.'
    help_msg += '\n * Failed transfer attempts will mark the job as failed, so that it'
    help_msg += '\n   can be resubmitted.\n'
    parser = ScriptOptions(usage='%s [OPTIONS] <config file>' + help_msg)

    def _add_bool_opt(group,
                      short_pair,
                      option_base,
                      help_base,
                      default=False,
                      option_prefix_pair=('', 'no'),
                      help_prefix_pair=('', 'do not '),
                      dest=None):
        def _create_help(idx):
            help_def = ''
            if (default and (idx == 0)) or ((not default) and (idx == 1)):
                help_def = ' [Default]'
            return help_prefix_pair[idx] + help_base + help_def

        def _create_opt(idx):
            return str.join(
                '-', option_prefix_pair[idx].split() + option_base.split())

        parser.add_flag(group,
                        short_pair or '  ', (_create_opt(0), _create_opt(1)),
                        default=default,
                        dest=dest,
                        help_pair=(_create_help(0), _create_help(1)))

    _add_bool_opt(
        None,
        'l ',
        'loop',
        default=False,
        help_base='loop over jobs until all files are successfully processed')
    _add_bool_opt(None,
                  'L ',
                  'infinite',
                  default=False,
                  help_base='process jobs in an infinite loop')
    _add_bool_opt(None,
                  None,
                  'shuffle',
                  default=False,
                  help_base='shuffle job processing order')
    parser.add_text(None,
                    'J',
                    'job-selector',
                    default=None,
                    help='specify the job selector')
    parser.add_text(
        None,
        'T',
        'token',
        default='VomsProxy',
        help=
        'specify the access token used to determine ability to download / delete'
        + ' - VomsProxy or TrivialAccessToken')
    parser.add_list(None,
                    'S',
                    'select-se',
                    default=None,
                    help='specify the SE paths to process')
    parser.add_bool(None,
                    'd',
                    'delete',
                    default=False,
                    help='perform file deletions')
    parser.add_bool(None,
                    'R',
                    'hide-results',
                    default=False,
                    help='specify if the transfer overview should be hidden')
    parser.add_text(
        None,
        't',
        'threads',
        default=0,
        help=
        'how many jobs should be processed in parallel [Default: no multithreading]'
    )
    parser.add_text(
        None,
        None,
        'slowdown',
        default=2,
        help='specify delay between processing jobs [Default: 2 sec]')

    parser.section('jobs', 'Job state / flag handling')
    _add_bool_opt('jobs',
                  None,
                  'job-success',
                  default=True,
                  help_base='only select successful jobs')
    _add_bool_opt('jobs',
                  None,
                  'mark-rm',
                  default=False,
                  option_prefix_pair=('ignore', 'use'),
                  dest='mark_ignore_rm',
                  help_base='mark about successfully removed jobs',
                  help_prefix_pair=('ignore ', 'use '))
    _add_bool_opt('jobs',
                  None,
                  'mark-dl',
                  default=True,
                  help_base='mark successfully downloaded jobs as such')
    _add_bool_opt('jobs',
                  None,
                  'mark-dl',
                  default=False,
                  option_prefix_pair=('ignore', 'use'),
                  dest='mark_ignore_dl',
                  help_base='mark about successfully downloaded jobs',
                  help_prefix_pair=('ignore ', 'use '))
    _add_bool_opt('jobs',
                  None,
                  'mark-fail',
                  default=True,
                  help_base='mark jobs failing verification as such')
    _add_bool_opt('jobs',
                  None,
                  'mark-empty-fail',
                  default=False,
                  help_base='mark jobs without any files as failed')

    parser.section('down', 'Download options')
    _add_bool_opt('down',
                  'v ',
                  'verify-md5',
                  default=True,
                  help_base='MD5 verification of SE files',
                  help_prefix_pair=('enable ', 'disable '))
    _add_bool_opt('down',
                  None,
                  '',
                  default=False,
                  option_prefix_pair=('skip-existing', 'overwrite'),
                  dest='skip_existing',
                  help_base='files which are already on local disk',
                  help_prefix_pair=('skip ', 'overwrite '))
    parser.add_text('down',
                    'o',
                    'output',
                    default=None,
                    help='specify the local output directory')
    parser.add_text('down',
                    'O',
                    'tmp-dir',
                    default='/tmp',
                    help='specify the local tmp directory')
    parser.add_text(
        'down',
        'r',
        'retry',
        help='how often should a transfer be attempted [Default: 0]')

    parser.section('file', 'Local / SE file handling during download')
    option_help_base_list = [
        ('local-ok', 'files of successful jobs in local directory'),
        ('local-fail', 'files of failed jobs in local directory'),
        ('se-ok', 'files of successful jobs on SE'),
        ('se-fail', 'files of failed jobs on the SE'),
    ]
    for (option, help_base) in option_help_base_list:
        _add_bool_opt('file',
                      None,
                      option,
                      default=False,
                      option_prefix_pair=('rm', 'keep'),
                      help_base=help_base,
                      help_prefix_pair=('remove ', 'keep '))

    parser.section('short_delete', 'Shortcuts for delete options')
    parser.add_fset(
        'short_delete',
        'D',
        'just-delete',
        help='Delete files from SE and local area - shorthand for:'.ljust(100)
        + '%s',
        flag_set='--delete --use-mark-rm --ignore-mark-dl ' +
        '--rm-se-fail --rm-local-fail --rm-se-ok --rm-local-ok')

    parser.section('short_down', 'Shortcuts for download options')
    parser.add_fset(
        'short_down',
        'm',
        'move',
        help='Move files from SE - shorthand for:'.ljust(100) + '%s',
        flag_set='--verify-md5 --overwrite --mark-dl --use-mark-dl --mark-fail '
        + '--rm-se-fail --rm-local-fail --rm-se-ok --keep-local-ok')
    parser.add_fset(
        'short_down',
        'c',
        'copy',
        help='Copy files from SE - shorthand for:'.ljust(100) + '%s',
        flag_set='--verify-md5 --overwrite --mark-dl --use-mark-dl --mark-fail '
        + '--rm-se-fail --rm-local-fail --keep-se-ok --keep-local-ok')
    parser.add_fset(
        'short_down',
        'j',
        'just-copy',
        help='Just copy files from SE - shorthand for:'.ljust(100) + '%s',
        flag_set=
        '--verify-md5 --skip-existing --no-mark-dl --ignore-mark-dl --no-mark-fail '
        + '--keep-se-fail --keep-local-fail --keep-se-ok --keep-local-ok')
    parser.add_fset(
        'short_down',
        's',
        'smart-copy',
        help='Copy correct files from SE, but remember already downloaded ' +
        'files and delete corrupt files - shorthand for: '.ljust(100) + '%s',
        flag_set='--verify-md5 --mark-dl --mark-fail --rm-se-fail ' +
        '--rm-local-fail --keep-se-ok --keep-local-ok')
    parser.add_fset(
        'short_down',
        'V',
        'just-verify',
        help='Just verify files on SE - shorthand for:'.ljust(100) + '%s',
        flag_set='--verify-md5 --no-mark-dl --keep-se-fail ' +
        '--rm-local-fail --keep-se-ok --rm-local-ok --ignore-mark-dl')

    options = parser.script_parse(verbose_short=None)
    if len(options.args
           ) != 1:  # we need exactly one positional argument (config file)
        parser.exit_with_usage(msg='Config file not specified!')
    options.opts.threads = int(options.opts.threads)
    return options
示例#9
0
def _main():
    signal.signal(signal.SIGINT, handle_abort_interrupt)

    parser = ScriptOptions()
    parser.section('expr', 'Manipulate lumi filter expressions',
                   '%s <lumi filter expression>')
    parser.add_bool('expr',
                    'G',
                    'gc',
                    default=False,
                    help='Output grid-control compatible lumi expression')
    parser.add_bool('expr',
                    'J',
                    'json',
                    default=False,
                    help='Output JSON file with lumi expression')
    parser.add_bool('expr',
                    'F',
                    'full',
                    default=False,
                    help='Output JSON file with full expression')

    parser.section('calc',
                   'Options which allow luminosity related calculations',
                   '%s <config file> [<job selector>]')
    parser.add_text('calc',
                    'O',
                    'output-dir',
                    default=None,
                    help='Set output directory (default: work directory)')
    parser.add_bool(
        'calc',
        'g',
        'job-gc',
        default=False,
        help=
        'Output grid-control compatible lumi expression for processed lumi sections'
    )
    parser.add_bool('calc',
                    'j',
                    'job-json',
                    default=False,
                    help='Output JSON file with processed lumi sections')
    parser.add_bool('calc',
                    'e',
                    'job-events',
                    default=False,
                    help='Get number of events processed')
    parser.add_bool(
        'calc',
        'p',
        'parameterized',
        default=False,
        help=
        'Use output file name to categorize output (useful for parameterized tasks)'
    )
    parser.add_bool(
        'calc',
        ' ',
        'replace',
        default='job_%d_',
        help='Pattern to replace for parameterized jobs (default: job_%%d_')
    options = parser.script_parse()

    if options.opts.gc or options.opts.json or options.opts.full:
        if not options.args:
            options.parser.exit_with_usage(options.parser.usage('expr'))
        return convert_lumi_expr(options.opts, options.args)

    if options.opts.job_json or options.opts.job_gc or options.opts.job_events:
        if not options.args:
            options.parser.exit_with_usage(options.parser.usage('calc'))
        script_obj = get_script_object_cmdline(options.args, only_success=True)
        work_dn = script_obj.config.get_work_path()
        reader = None
        try:
            reader = DataSplitter.load_partitions(
                os.path.join(work_dn, 'datamap.tar'))
        except Exception:
            clear_current_exception()
        jobnum_list = sorted(
            script_obj.job_db.get_job_list(ClassSelector(JobClass.SUCCESS)))
        return lumi_calc(options.opts, work_dn, jobnum_list, reader)
示例#10
0
def _parse_cmd_line():
    parser = ScriptOptions()
    parser.section('back', 'Backend debugging', '%s ...')
    parser.add_text('back',
                    '',
                    'backend-list',
                    default='',
                    help='Specify backend discovery plugin')

    parser.section('part', 'Dataset Partition debugging',
                   '%s <path to partition file> ...')
    parser.add_bool('part',
                    '',
                    'partition-list',
                    default=False,
                    help='List all dataset partitions')
    parser.add_bool('part',
                    '',
                    'partition-list-invalid',
                    default=False,
                    help='List invalidated dataset partitions')
    parser.add_text('part',
                    '',
                    'partition-key-list',
                    default='',
                    help='Select dataset partition information to display')

    parser.section('jobs', 'Jobs debugging',
                   '%s <config file / job file> ... ')
    parser.add_text('jobs',
                    '',
                    'job-selector',
                    default='',
                    help='Display jobs matching selector')
    parser.add_bool('jobs',
                    '',
                    'job-reset-attempts',
                    default=False,
                    help='Reset the attempt counter')
    parser.add_text('jobs',
                    '',
                    'job-force-state',
                    default='',
                    help='Force new job state')
    parser.add_text('jobs',
                    '',
                    'job-show-jdl',
                    default='',
                    help='Show JDL file if available')

    parser.section('data', 'Dataset debugging',
                   '%s <dataset file> <dataset file> ...')
    parser.add_bool('data',
                    '',
                    'dataset-show-diff',
                    default=False,
                    help='Show difference between datasets')
    parser.add_bool('data',
                    '',
                    'dataset-show-removed',
                    default=False,
                    help='Find removed dataset blocks')

    parser.add_text(None,
                    'd',
                    'logfile-decode',
                    default='',
                    help='Decode log files')
    options = parser.script_parse()

    # Parse partition key list
    if options.opts.partition_key_list in ('', 'all'):
        partition_key_list = DataSplitter.enum_value_list
    else:
        partition_key_list = lmap(DataSplitter.str2enum,
                                  options.opts.partition_key_list.split(','))
    if None in partition_key_list:
        logging.warning('Available keys: %r', DataSplitter.enum_name_list)
    options.opts.partition_key_list = partition_key_list

    return options
示例#11
0
def _parse_cmd_line():
    parser = ScriptOptions(usage='%s [OPTIONS] <parameter definition>')
    parser.add_accu(None,
                    'c',
                    'collapse',
                    default=0,
                    help='Do not collapse dataset infos in display')
    parser.add_bool(None,
                    'a',
                    'active',
                    default=False,
                    help='Show activity state')
    parser.add_bool(None,
                    'd',
                    'disabled',
                    default=False,
                    help='Show disabled parameter sets')
    parser.add_bool(None,
                    'l',
                    'list-parameters',
                    default=False,
                    help='Display parameter list')
    parser.add_bool(None,
                    'L',
                    'show-sources',
                    default=False,
                    help='Show parameter sources')
    parser.add_bool(None,
                    't',
                    'untracked',
                    default=False,
                    help='Display untracked variables')
    parser.add_bool(None,
                    'T',
                    'persistent',
                    default=False,
                    help='Work with persistent parameters')
    parser.add_list(None,
                    'p',
                    'parameter',
                    default=[],
                    help='Specify parameters')
    parser.add_text(
        None,
        'D',
        'dataset',
        default='',
        help='Add dataset splitting (use "True" to simulate a dataset)')
    parser.add_text(
        None,
        'j',
        'job',
        default=None,
        help='Select job to display (used for unbounded parameter spaces)')
    parser.add_text(None,
                    'F',
                    'factory',
                    default=None,
                    help='Select parameter source factory')
    parser.add_text(None,
                    'o',
                    'output',
                    default='',
                    help='Show only specified parameters')
    parser.add_text(None,
                    'S',
                    'save',
                    default='',
                    help='Saves information to specified file')
    parser.add_text(None,
                    'V',
                    'visible',
                    default='',
                    help='Set visible variables')
    options = parser.script_parse()
    if len(options.args) != 1:
        parser.exit_with_usage()
    return options
示例#12
0
def _main():
    parser = ScriptOptions(usage='%s [OPTIONS] <BasePlugin>')
    parser.add_bool(None,
                    'a',
                    'show_all',
                    default=False,
                    help='Show plugins without user alias')
    parser.add_bool(None,
                    'p',
                    'parents',
                    default=False,
                    help='Show plugin parents')
    parser.add_bool(None,
                    'c',
                    'children',
                    default=False,
                    help='Show plugin children')
    options = parser.script_parse()
    if len(options.args) != 1:
        parser.exit_with_usage()
    pname = options.args[0]
    if options.opts.parents:

        def _get_cls_info(cls):
            return {
                'Name': cls.__name__,
                'Alias': str.join(', ',
                                  lidfilter(cls.get_class_name_list()[1:]))
            }

        display_plugin_list(lmap(_get_cls_info,
                                 Plugin.get_class(pname).iter_class_bases()),
                            show_all=True,
                            sort_key=None,
                            title='Parents of plugin %r' % pname)
    else:
        sort_key = 'Name'
        if options.opts.children:
            sort_key = 'Inherit'
        display_plugin_list(
            get_plugin_list(pname, inherit_prefix=options.opts.children),
            show_all=options.opts.children or options.opts.show_all,
            sort_key=sort_key,
            title='Available plugins of type %r' % pname)
def _parse_cmd_line():
	parser = ScriptOptions(usage='%s [OPTIONS] <config file / work directory>')
	parser.section('disc', 'Discovery options - ignored in case dbs input file is specified')
	# options that are used as config settings for InfoScanners
	parser.add_text('disc', 'n', 'dataset-name-pattern', default='',
		help='Specify dbs path name - Example: DataSet_@NICK@_@VAR@')
	parser.add_text('disc', 'H', 'dataset-hash-keys', default='',
		help='Included additional variables in dataset hash calculation')
	parser.add_text('disc', 'J', 'source-job-selector', default='',
		help='Specify dataset(s) to process')
	parser.add_bool('disc', 'p', 'no-parents', default=False,
		help='Do not add parent infromation to DBS')
	parser.add_bool('disc', 'm', 'merge-parents', default=False,
		help='Merge output files from different parent blocks ' +
			'into a single block [Default: Keep boundaries]')
	parser.add_text('disc', 'P', 'parent-source', default='',
		help='Override parent information source - to bootstrap a reprocessing on local files')
	# options directly used by this script
	parser.add_text('disc', 'T', 'datatype', default=None,
		help='Supply dataset type in case cmssw report did not specify it - valid values: "mc" or "data"')
	parser.add_bool('disc', 'j', 'jobhash', default=False,
		help='Use hash of all config files in job for dataset key calculation')
	parser.add_bool('disc', 'u', 'unique-cfg', default=False,
		help='Circumvent edmConfigHash collisions so each dataset ' +
			'is stored with unique config information')
	parser.add_text('disc', 'G', 'globaltag', default='crab2_tag',
		help='Specify global tag')

	parser.section('proc', 'Processing mode')
	parser.add_bool('proc', 'd', 'discovery', default=False,
		help='Enable discovery mode - just collect file information and exit')
	parser.add_text('proc', ' ', 'tempdir', default='',
		help='Override temp directory')
	parser.add_bool('proc', 'i', 'no-import', default=True, dest='do_import',
		help='Disable import of new datasets into target DBS instance - ' +
			'only temporary json files are created')
	parser.add_bool('proc', 'I', 'incremental', default=False,
		help='Skip import of existing files - Warning: this destroys coherent block structure!')
	parser.add_bool('proc', 'o', 'open-blocks', default=True, dest='do_close_blocks',
		help='Keep blocks open for addition of further files [Default: Close blocks]')

	parser.section('dbsi', 'DBS instance handling')
	parser.add_text('dbsi', 't', 'target-instance', default='https://cmsweb.cern.ch/dbs/prod/phys03',
		help='Specify target dbs instance url')
	parser.add_text('dbsi', 's', 'source-instance', default='https://cmsweb.cern.ch/dbs/prod/global',
		help='Specify source dbs instance url(s), where parent datasets are taken from')

	parser.add_text(None, 'F', 'input-file', default=None,
		help='Specify dbs input file to use instead of scanning job output')
	parser.add_bool(None, 'c', 'continue-migration', default=False,
		help='Continue an already started migration')

	return parser.script_parse()
示例#14
0
def _parse_cmd_line():
    usage = '%s [OPTIONS] <DBS dataset path> | <dataset cache file> ...' % sys.argv[
        0]
    parser = ScriptOptions(usage)
    parser.add_bool(None,
                    'N',
                    'list-dataset-names',
                    default=False,
                    help='Show list of all dataset names in query / file')
    parser.add_bool(None,
                    'l',
                    'list-datasets',
                    default=False,
                    help='Show list of all datasets in query / file')
    parser.add_bool(None,
                    'b',
                    'list-blocks',
                    default=False,
                    help='Show list of blocks of the dataset(s)')
    parser.add_bool(None,
                    'f',
                    'list-files',
                    default=False,
                    help='Show list of all files grouped according to blocks')
    parser.add_bool(None,
                    's',
                    'list-storage',
                    default=False,
                    help='Show list of locations where data is stored')
    parser.add_bool(None,
                    'm',
                    'list-metadata',
                    default=False,
                    help='Get metadata infomation of dataset files')
    parser.add_bool(None,
                    'M',
                    'list-metadata-common',
                    default=False,
                    help='Get common metadata infomation of dataset blocks')
    parser.add_bool(None,
                    'Q',
                    'metadata',
                    default=False,
                    help='Force retrieval of dataset metadata')
    parser.add_bool(None,
                    'O',
                    'ordered',
                    default=False,
                    help='Sort dataset blocks and files')
    parser.add_text(None,
                    'p',
                    'provider',
                    default='',
                    help='Specify dataset provider [Default:<autoselect>]')
    parser.add_text(
        None,
        'C',
        'settings',
        default='',
        help='Specify config file as source of detailed dataset settings')
    parser.add_text(None,
                    'S',
                    'save',
                    default='',
                    help='Saves dataset information to specified file')
    parser.add_bool(
        None,
        'c',
        'config-entry',
        default=False,
        dest='list_config_entry',
        help='Gives config file entries to run over given dataset(s)')
    parser.add_bool(
        None,
        'n',
        'config-nick',
        default=False,
        help='Use dataset path to derive nickname in case it it undefined')
    parser.add_text(
        None,
        'T',
        'threads',
        default=None,
        help='Specify maximum number of threads used during dataset retrieval')
    parser.add_text(None,
                    'L',
                    'location',
                    default='hostname',
                    help='Format of location information')
    parser.add_text(None,
                    'F',
                    'location-filter',
                    default='',
                    help='Specifiy location filter')
    options = parser.script_parse()
    # we need exactly one positional argument (dataset path)
    if not options.args:
        parser.exit_with_usage()
    return options
示例#15
0
def _parse_cmd_line():
	parser = ScriptOptions()
	parser.section('back', 'Backend debugging', '%s ...')
	parser.add_text('back', '', 'backend-list', default='',
		help='Specify backend discovery plugin')

	parser.section('part', 'Dataset Partition debugging', '%s <path to partition file> ...')
	parser.add_bool('part', '', 'partition-list', default=False,
		help='List all dataset partitions')
	parser.add_bool('part', '', 'partition-list-invalid', default=False,
		help='List invalidated dataset partitions')
	parser.add_text('part', '', 'partition-key-list', default='',
		help='Select dataset partition information to display')

	parser.section('jobs', 'Jobs debugging', '%s <config file / job file> ... ')
	parser.add_text('jobs', '', 'job-selector', default='',
		help='Display jobs matching selector')
	parser.add_bool('jobs', '', 'job-reset-attempts', default=False,
		help='Reset the attempt counter')
	parser.add_text('jobs', '', 'job-force-state', default='',
		help='Force new job state')
	parser.add_text('jobs', '', 'job-show-jdl', default='',
		help='Show JDL file if available')

	parser.section('data', 'Dataset debugging', '%s <dataset file> <dataset file> ...')
	parser.add_bool('data', '', 'dataset-show-diff', default=False,
		help='Show difference between datasets')
	parser.add_bool('data', '', 'dataset-show-removed', default=False,
		help='Find removed dataset blocks')

	parser.add_text(None, 'd', 'logfile-decode', default='',
		help='Decode log files')
	options = parser.script_parse()

	# Parse partition key list
	if options.opts.partition_key_list in ('', 'all'):
		partition_key_list = DataSplitter.enum_value_list
	else:
		partition_key_list = lmap(DataSplitter.str2enum, options.opts.partition_key_list.split(','))
	if None in partition_key_list:
		logging.warning('Available keys: %r', DataSplitter.enum_name_list)
	options.opts.partition_key_list = partition_key_list

	return options
def _parse_cmd_line():
    parser = ScriptOptions(usage='%s [OPTIONS] <config file / work directory>')
    parser.section(
        'disc',
        'Discovery options - ignored in case dbs input file is specified')
    # options that are used as config settings for InfoScanners
    parser.add_text(
        'disc',
        'n',
        'dataset-name-pattern',
        default='',
        help='Specify dbs path name - Example: DataSet_@NICK@_@VAR@')
    parser.add_text(
        'disc',
        'H',
        'dataset-hash-keys',
        default='',
        help='Included additional variables in dataset hash calculation')
    parser.add_text('disc',
                    'J',
                    'source-job-selector',
                    default='',
                    help='Specify dataset(s) to process')
    parser.add_bool('disc',
                    'p',
                    'no-parents',
                    default=False,
                    help='Do not add parent infromation to DBS')
    parser.add_bool('disc',
                    'm',
                    'merge-parents',
                    default=False,
                    help='Merge output files from different parent blocks ' +
                    'into a single block [Default: Keep boundaries]')
    parser.add_text(
        'disc',
        'P',
        'parent-source',
        default='',
        help=
        'Override parent information source - to bootstrap a reprocessing on local files'
    )
    # options directly used by this script
    parser.add_text(
        'disc',
        'T',
        'datatype',
        default=None,
        help=
        'Supply dataset type in case cmssw report did not specify it - valid values: "mc" or "data"'
    )
    parser.add_bool(
        'disc',
        'j',
        'jobhash',
        default=False,
        help='Use hash of all config files in job for dataset key calculation')
    parser.add_bool(
        'disc',
        'u',
        'unique-cfg',
        default=False,
        help='Circumvent edmConfigHash collisions so each dataset ' +
        'is stored with unique config information')
    parser.add_text('disc',
                    'G',
                    'globaltag',
                    default='crab2_tag',
                    help='Specify global tag')

    parser.section('proc', 'Processing mode')
    parser.add_bool(
        'proc',
        'd',
        'discovery',
        default=False,
        help='Enable discovery mode - just collect file information and exit')
    parser.add_text('proc',
                    ' ',
                    'tempdir',
                    default='',
                    help='Override temp directory')
    parser.add_bool(
        'proc',
        'i',
        'no-import',
        default=True,
        dest='do_import',
        help='Disable import of new datasets into target DBS instance - ' +
        'only temporary json files are created')
    parser.add_bool(
        'proc',
        'I',
        'incremental',
        default=False,
        help=
        'Skip import of existing files - Warning: this destroys coherent block structure!'
    )
    parser.add_bool(
        'proc',
        'o',
        'open-blocks',
        default=True,
        dest='do_close_blocks',
        help=
        'Keep blocks open for addition of further files [Default: Close blocks]'
    )

    parser.section('dbsi', 'DBS instance handling')
    parser.add_text('dbsi',
                    't',
                    'target-instance',
                    default='https://cmsweb.cern.ch/dbs/prod/phys03',
                    help='Specify target dbs instance url')
    parser.add_text(
        'dbsi',
        's',
        'source-instance',
        default='https://cmsweb.cern.ch/dbs/prod/global',
        help=
        'Specify source dbs instance url(s), where parent datasets are taken from'
    )

    parser.add_text(
        None,
        'F',
        'input-file',
        default=None,
        help='Specify dbs input file to use instead of scanning job output')
    parser.add_bool(None,
                    'c',
                    'continue-migration',
                    default=False,
                    help='Continue an already started migration')

    return parser.script_parse()
示例#17
0
def _main():
	parser = ScriptOptions(usage='%s [OPTIONS] <config file>')
	parser.add_bool(None, 'L', 'report-list', default=False,
		help='List available report classes')
	parser.add_bool(None, 'T', 'use-task', default=False,
		help='Forward task information to report')
	parser.add_text(None, 'R', 'report', default='modern')
	parser.add_text(None, 'J', 'job-selector', default=None)
	options = parser.script_parse()

	if options.opts.report_list:
		display_plugin_list_for('Report', title='Available report classes')

	if len(options.args) != 1:
		parser.exit_with_usage()

	script_obj = get_script_object(config_file=options.args[0],
		job_selector_str=options.opts.job_selector, require_task=options.opts.use_task)
	report = script_obj.new_config.get_composited_plugin('transient report',
		options.opts.report, 'MultiReport', cls='Report',
		pargs=(script_obj.job_db, script_obj.task))
	report.show_report(script_obj.job_db, script_obj.job_db.get_job_list())
def _parse_cmd_line():
	help_msg = '\n\nDEFAULT: The default is to download the SE file and check them with MD5 hashes.'
	help_msg += '\n * In case all files are transferred sucessfully, the job is marked'
	help_msg += '\n   as already downloaded, so that the files are not copied again.'
	help_msg += '\n * Failed transfer attempts will mark the job as failed, so that it'
	help_msg += '\n   can be resubmitted.\n'
	parser = ScriptOptions(usage='%s [OPTIONS] <config file>' + help_msg)

	def _add_bool_opt(group, short_pair, option_base, help_base, default=False,
			option_prefix_pair=('', 'no'), help_prefix_pair=('', 'do not '), dest=None):
		def _create_help(idx):
			help_def = ''
			if (default and (idx == 0)) or ((not default) and (idx == 1)):
				help_def = ' [Default]'
			return help_prefix_pair[idx] + help_base + help_def

		def _create_opt(idx):
			return str.join('-', option_prefix_pair[idx].split() + option_base.split())

		parser.add_flag(group, short_pair or '  ', (_create_opt(0), _create_opt(1)),
			default=default, dest=dest, help_pair=(_create_help(0), _create_help(1)))

	_add_bool_opt(None, 'l ', 'loop', default=False,
		help_base='loop over jobs until all files are successfully processed')
	_add_bool_opt(None, 'L ', 'infinite', default=False,
		help_base='process jobs in an infinite loop')
	_add_bool_opt(None, None, 'shuffle', default=False,
		help_base='shuffle job processing order')
	parser.add_text(None, 'J', 'job-selector', default=None,
		help='specify the job selector')
	parser.add_text(None, 'T', 'token', default='VomsProxy',
		help='specify the access token used to determine ability to download / delete' +
			' - VomsProxy or TrivialAccessToken')
	parser.add_list(None, 'S', 'select-se', default=None,
		help='specify the SE paths to process')
	parser.add_bool(None, 'd', 'delete', default=False,
		help='perform file deletions')
	parser.add_bool(None, 'R', 'hide-results', default=False,
		help='specify if the transfer overview should be hidden')
	parser.add_text(None, 't', 'threads', default=0,
		help='how many jobs should be processed in parallel [Default: no multithreading]')
	parser.add_text(None, None, 'slowdown', default=2,
		help='specify delay between processing jobs [Default: 2 sec]')

	parser.section('jobs', 'Job state / flag handling')
	_add_bool_opt('jobs', None, 'job-success', default=True,
		help_base='only select successful jobs')
	_add_bool_opt('jobs', None, 'mark-rm', default=False,
		option_prefix_pair=('ignore', 'use'), dest='mark_ignore_rm',
		help_base='mark about successfully removed jobs', help_prefix_pair=('ignore ', 'use '))
	_add_bool_opt('jobs', None, 'mark-dl', default=True,
		help_base='mark successfully downloaded jobs as such')
	_add_bool_opt('jobs', None, 'mark-dl', default=False,
		option_prefix_pair=('ignore', 'use'), dest='mark_ignore_dl',
		help_base='mark about successfully downloaded jobs', help_prefix_pair=('ignore ', 'use '))
	_add_bool_opt('jobs', None, 'mark-fail', default=True,
		help_base='mark jobs failing verification as such')
	_add_bool_opt('jobs', None, 'mark-empty-fail', default=False,
		help_base='mark jobs without any files as failed')

	parser.section('down', 'Download options')
	_add_bool_opt('down', 'v ', 'verify-md5', default=True,
		help_base='MD5 verification of SE files', help_prefix_pair=('enable ', 'disable '))
	_add_bool_opt('down', None, '', default=False,
		option_prefix_pair=('skip-existing', 'overwrite'), dest='skip_existing',
		help_base='files which are already on local disk', help_prefix_pair=('skip ', 'overwrite '))
	parser.add_text('down', 'o', 'output', default=None,
		help='specify the local output directory')
	parser.add_text('down', 'O', 'tmp-dir', default='/tmp',
		help='specify the local tmp directory')
	parser.add_text('down', 'r', 'retry',
		help='how often should a transfer be attempted [Default: 0]')

	parser.section('file', 'Local / SE file handling during download')
	option_help_base_list = [
		('local-ok', 'files of successful jobs in local directory'),
		('local-fail', 'files of failed jobs in local directory'),
		('se-ok', 'files of successful jobs on SE'),
		('se-fail', 'files of failed jobs on the SE'),
	]
	for (option, help_base) in option_help_base_list:
		_add_bool_opt('file', None, option, default=False, option_prefix_pair=('rm', 'keep'),
			help_base=help_base, help_prefix_pair=('remove ', 'keep '))

	parser.section('short_delete', 'Shortcuts for delete options')
	parser.add_fset('short_delete', 'D', 'just-delete',
		help='Delete files from SE and local area - shorthand for:'.ljust(100) + '%s',
		flag_set='--delete --use-mark-rm --ignore-mark-dl ' +
			'--rm-se-fail --rm-local-fail --rm-se-ok --rm-local-ok')

	parser.section('short_down', 'Shortcuts for download options')
	parser.add_fset('short_down', 'm', 'move',
		help='Move files from SE - shorthand for:'.ljust(100) + '%s',
		flag_set='--verify-md5 --overwrite --mark-dl --use-mark-dl --mark-fail ' +
			'--rm-se-fail --rm-local-fail --rm-se-ok --keep-local-ok')
	parser.add_fset('short_down', 'c', 'copy',
		help='Copy files from SE - shorthand for:'.ljust(100) + '%s',
		flag_set='--verify-md5 --overwrite --mark-dl --use-mark-dl --mark-fail ' +
			'--rm-se-fail --rm-local-fail --keep-se-ok --keep-local-ok')
	parser.add_fset('short_down', 'j', 'just-copy',
		help='Just copy files from SE - shorthand for:'.ljust(100) + '%s',
		flag_set='--verify-md5 --skip-existing --no-mark-dl --ignore-mark-dl --no-mark-fail ' +
			'--keep-se-fail --keep-local-fail --keep-se-ok --keep-local-ok')
	parser.add_fset('short_down', 's', 'smart-copy',
		help='Copy correct files from SE, but remember already downloaded ' +
			'files and delete corrupt files - shorthand for: '.ljust(100) + '%s',
		flag_set='--verify-md5 --mark-dl --mark-fail --rm-se-fail ' +
			'--rm-local-fail --keep-se-ok --keep-local-ok')
	parser.add_fset('short_down', 'V', 'just-verify',
		help='Just verify files on SE - shorthand for:'.ljust(100) + '%s',
		flag_set='--verify-md5 --no-mark-dl --keep-se-fail ' +
			'--rm-local-fail --keep-se-ok --rm-local-ok --ignore-mark-dl')

	options = parser.script_parse(verbose_short=None)
	if len(options.args) != 1:  # we need exactly one positional argument (config file)
		parser.exit_with_usage(msg='Config file not specified!')
	options.opts.threads = int(options.opts.threads)
	return options
示例#19
0
def _main():
	signal.signal(signal.SIGINT, handle_abort_interrupt)

	parser = ScriptOptions()
	parser.section('expr', 'Manipulate lumi filter expressions', '%s <lumi filter expression>')
	parser.add_bool('expr', 'G', 'gc', default=False,
		help='Output grid-control compatible lumi expression')
	parser.add_bool('expr', 'J', 'json', default=False,
		help='Output JSON file with lumi expression')
	parser.add_bool('expr', 'F', 'full', default=False,
		help='Output JSON file with full expression')

	parser.section('calc', 'Options which allow luminosity related calculations',
		'%s <config file> [<job selector>]')
	parser.add_text('calc', 'O', 'output-dir', default=None,
		help='Set output directory (default: work directory)')
	parser.add_bool('calc', 'g', 'job-gc', default=False,
		help='Output grid-control compatible lumi expression for processed lumi sections')
	parser.add_bool('calc', 'j', 'job-json', default=False,
		help='Output JSON file with processed lumi sections')
	parser.add_bool('calc', 'e', 'job-events', default=False,
		help='Get number of events processed')
	parser.add_bool('calc', 'p', 'parameterized', default=False,
		help='Use output file name to categorize output (useful for parameterized tasks)')
	parser.add_bool('calc', ' ', 'replace', default='job_%d_',
		help='Pattern to replace for parameterized jobs (default: job_%%d_')
	options = parser.script_parse()

	if options.opts.gc or options.opts.json or options.opts.full:
		if not options.args:
			options.parser.exit_with_usage(options.parser.usage('expr'))
		return convert_lumi_expr(options.opts, options.args)

	if options.opts.job_json or options.opts.job_gc or options.opts.job_events:
		if not options.args:
			options.parser.exit_with_usage(options.parser.usage('calc'))
		script_obj = get_script_object_cmdline(options.args, only_success=True)
		work_dn = script_obj.config.get_work_path()
		reader = None
		try:
			reader = DataSplitter.load_partitions(os.path.join(work_dn, 'datamap.tar'))
		except Exception:
			clear_current_exception()
		jobnum_list = sorted(script_obj.job_db.get_job_list(ClassSelector(JobClass.SUCCESS)))
		return lumi_calc(options.opts, work_dn, jobnum_list, reader)
示例#20
0
def _parse_cmd_line():
	usage = '%s [OPTIONS] <DBS dataset path> | <dataset cache file> ...' % sys.argv[0]
	parser = ScriptOptions(usage)
	parser.add_bool(None, 'N', 'list-dataset-names', default=False,
		help='Show list of all dataset names in query / file')
	parser.add_bool(None, 'l', 'list-datasets', default=False,
		help='Show list of all datasets in query / file')
	parser.add_bool(None, 'b', 'list-blocks', default=False,
		help='Show list of blocks of the dataset(s)')
	parser.add_bool(None, 'f', 'list-files', default=False,
		help='Show list of all files grouped according to blocks')
	parser.add_bool(None, 's', 'list-storage', default=False,
		help='Show list of locations where data is stored')
	parser.add_bool(None, 'm', 'list-metadata', default=False,
		help='Get metadata infomation of dataset files')
	parser.add_bool(None, 'M', 'list-metadata-common', default=False,
		help='Get common metadata infomation of dataset blocks')
	parser.add_bool(None, 'Q', 'metadata', default=False,
		help='Force retrieval of dataset metadata')
	parser.add_bool(None, 'O', 'ordered', default=False,
		help='Sort dataset blocks and files')
	parser.add_text(None, 'p', 'provider', default='',
		help='Specify dataset provider [Default:<autoselect>]')
	parser.add_text(None, 'C', 'settings', default='',
		help='Specify config file as source of detailed dataset settings')
	parser.add_text(None, 'S', 'save', default='',
		help='Saves dataset information to specified file')
	parser.add_bool(None, 'c', 'config-entry', default=False, dest='list_config_entry',
		help='Gives config file entries to run over given dataset(s)')
	parser.add_bool(None, 'n', 'config-nick', default=False,
		help='Use dataset path to derive nickname in case it it undefined')
	parser.add_bool(None, 'T', 'disable-threaded', default=False,
		help='Disable multi-threaded dataset retrieval')
	parser.add_text(None, 'L', 'location', default='hostname',
		help='Format of location information')
	parser.add_text(None, 'F', 'location-filter', default='',
		help='Specifiy location filter')
	options = parser.script_parse()
	# we need exactly one positional argument (dataset path)
	if not options.args:
		parser.exit_with_usage()
	return options
示例#21
0
def _parse_cmd_line():
	parser = ScriptOptions(usage='%s [OPTIONS] <parameter definition>')
	parser.add_accu(None, 'c', 'collapse', default=0,
		help='Do not collapse dataset infos in display')
	parser.add_bool(None, 'a', 'active', default=False,
		help='Show activity state')
	parser.add_bool(None, 'd', 'disabled', default=False,
		help='Show disabled parameter sets')
	parser.add_bool(None, 'l', 'list-parameters', default=False,
		help='Display parameter list')
	parser.add_bool(None, 'L', 'show-sources', default=False,
		help='Show parameter sources')
	parser.add_bool(None, 't', 'untracked', default=False,
		help='Display untracked variables')
	parser.add_bool(None, 'T', 'persistent', default=False,
		help='Work with persistent parameters')
	parser.add_list(None, 'p', 'parameter', default=[],
		help='Specify parameters')
	parser.add_text(None, 'D', 'dataset', default='',
		help='Add dataset splitting (use "True" to simulate a dataset)')
	parser.add_text(None, 'j', 'job', default=None,
		help='Select job to display (used for unbounded parameter spaces)')
	parser.add_text(None, 'F', 'factory', default=None,
		help='Select parameter source factory')
	parser.add_text(None, 'o', 'output', default='',
		help='Show only specified parameters')
	parser.add_text(None, 'S', 'save', default='',
		help='Saves information to specified file')
	parser.add_text(None, 'V', 'visible', default='',
		help='Set visible variables')
	options = parser.script_parse()
	if len(options.args) != 1:
		parser.exit_with_usage()
	return options