コード例 #1
0
ファイル: access_cms.py プロジェクト: tolange/grid-control
def _get_cms_cert(config):
	config = config.change_view(set_sections=['cms', 'access', 'proxy'])
	try:
		access = AccessToken.create_instance('VomsAccessToken', config, 'cms-proxy')
	except Exception:
		if os.environ.get('X509_USER_PROXY'):
			return os.environ['X509_USER_PROXY']
		raise CMSAuthenticationException('Unable to find grid environment')
	can_submit = ignore_exception(Exception, False, access.can_submit, 5 * 60, True)
	if not can_submit:
		logging.getLogger('access.cms').warning('The grid proxy has expired or is invalid!')
		role = config.get_list('new proxy roles', '', on_change=None)
		timeout = config.get_time('new proxy timeout', 10, on_change=None)
		lifetime = config.get_time('new proxy lifetime', 192 * 60, on_change=None)
		# password in variable name removes it from debug log
		password = getpass.getpass('Please enter proxy password: '******'voms-proxy-init')
			proc = LocalProcess(proxy_init_exec, '--voms', str.join(':', ['cms'] + role),
				'--valid', '%d:%d' % (lifetime / 60, lifetime % 60), logging=False)
			if password:
				proc.stdin.write(password + '\n')
				proc.stdin.close()
			proc.get_output(timeout=timeout)
		except Exception:
			raise CMSAuthenticationException('Unable to create new grid proxy')
		access = AccessToken.create_instance('VomsAccessToken', config, 'cms-proxy')  # new instance
		can_submit = ignore_exception(Exception, False, access.can_submit, 5 * 60, True)
		if not can_submit:
			raise CMSAuthenticationException('Newly created grid proxy is also invalid')
	return access.get_auth_fn_list()[0]
コード例 #2
0
	def __init__(self, config):
		config.set('jobs', 'monitor', 'dashboard', override=False)
		config.set('grid', 'sites', '-samtest -cmsprodhi', append=True)

		site_db = SiteDB()
		token = AccessToken.create_instance('VomsProxy', create_config(), 'token')
		self._hn_name = site_db.dn_to_username(token.get_fq_user_name())
		if not self._hn_name:
			raise ConfigError('Unable to map grid certificate to hn name!')
コード例 #3
0
    def __init__(self, config):
        config.set('jobs', 'monitor', 'dashboard', override=False)
        config.set('grid', 'sites', '-samtest -cmsprodhi', append=True)

        site_db = CRIC()
        token = AccessToken.create_instance('VomsProxy', create_config(),
                                            'token')
        self._hn_name = site_db.dn_to_username(token.get_fq_user_name())
        if not self._hn_name:
            raise ConfigError('Unable to map grid certificate to hn name!')
コード例 #4
0
 def __init__(self,
              config,
              datasource_name,
              dataset_expr,
              dataset_nick=None,
              dataset_proc=None):
     dataset_config = config.change_view(
         default_on_change=TriggerResync(['datasets', 'parameters']))
     self._lumi_filter = dataset_config.get_lookup(
         ['lumi filter', '%s lumi filter' % datasource_name],
         default={},
         parser=parse_lumi_filter,
         strfun=str_lumi)
     if not self._lumi_filter.empty():
         config.set('%s processor' % datasource_name, 'LumiDataProcessor',
                    '+=')
     DataProvider.__init__(self, config, datasource_name, dataset_expr,
                           dataset_nick, dataset_proc)
     # LumiDataProcessor instantiated in DataProcessor.__ini__ will set lumi metadata as well
     self._lumi_query = dataset_config.get_bool(
         ['lumi metadata',
          '%s lumi metadata' % datasource_name],
         default=not self._lumi_filter.empty())
     config.set('phedex sites matcher mode', 'ShellStyleMatcher', '?=')
     # PhEDex blacklist: 'T1_*_Disk nodes allow user jobs - other T1's dont!
     self._phedex_filter = dataset_config.get_filter(
         'phedex sites',
         '-* T1_*_Disk T2_* T3_*',
         default_matcher='BlackWhiteMatcher',
         default_filter='StrictListFilter')
     self._only_complete = dataset_config.get_bool('only complete sites',
                                                   True)
     self._only_valid = dataset_config.get_bool('only valid', True)
     self._allow_phedex = dataset_config.get_bool('allow phedex', True)
     self._location_format = dataset_config.get_enum(
         'location format', CMSLocationFormat, CMSLocationFormat.hostname)
     self._sitedb = CRIC()
     token = AccessToken.create_instance('VomsProxy', create_config(),
                                         'token')
     self._rucio = Client(
         account=self._sitedb.dn_to_username(token.get_fq_user_name()))
     dataset_expr_parts = split_opt(dataset_expr, '@#')
     (self._dataset_path, self._dataset_instance,
      self._dataset_block_selector) = dataset_expr_parts
     instance_default = dataset_config.get('dbs instance', '')
     self._dataset_instance = self._dataset_instance or instance_default
     if not self._dataset_instance:
         self._dataset_instance = 'prod/global'
     elif '/' not in self._dataset_instance:
         self._dataset_instance = 'prod/%s' % self._dataset_instance
     self._dataset_block_selector = self._dataset_block_selector or 'all'
コード例 #5
0
def process_all(opts, args):
    # Init everything in each loop to pick up changes
    script_obj = get_script_object(args[0],
                                   opts.job_selector,
                                   only_success=False)
    token = AccessToken.create_instance(opts.token, script_obj.new_config,
                                        'token')
    work_dn = script_obj.config.get_work_path()
    if process_all.first:
        logging.getLogger().addHandler(
            ProcessArchiveHandler(os.path.join(work_dn, 'error.tar')))
        process_all.first = False

    # Create SE output dir
    if not opts.output:
        opts.output = os.path.join(work_dn, 'se_output')
    if '://' not in opts.output:
        opts.output = 'file:///%s' % os.path.abspath(opts.output)

    job_db = script_obj.job_db
    jobnum_list = job_db.get_job_list()
    status_mon = StatusMonitor(len(jobnum_list))
    if opts.shuffle:
        random.shuffle(jobnum_list)
    else:
        jobnum_list.sort()

    if opts.threads:
        activity = Activity('Processing jobs')
        pool = GCThreadPool(opts.threads)
        for jobnum in jobnum_list:
            pool.start_daemon('Processing job %d' % jobnum, process_job, opts,
                              work_dn, status_mon, job_db, token, jobnum)
        pool.wait_and_drop()
        activity.finish()
    else:
        progress = ProgressActivity('Processing job', max(jobnum_list) + 1)
        for jobnum in jobnum_list:
            progress.update_progress(jobnum)
            process_job(opts, work_dn, status_mon, job_db, token, jobnum)
        progress.finish()

    # Print overview
    if not opts.hide_results:
        status_mon.show_results()
    return status_mon.is_finished()
コード例 #6
0
def process_all(opts, args):
	# Init everything in each loop to pick up changes
	script_obj = get_script_object(args[0], opts.job_selector, only_success=False)
	token = AccessToken.create_instance(opts.token, script_obj.new_config, 'token')
	work_dn = script_obj.config.get_work_path()
	if process_all.first:
		logging.getLogger().addHandler(ProcessArchiveHandler(os.path.join(work_dn, 'error.tar')))
		process_all.first = False

	# Create SE output dir
	if not opts.output:
		opts.output = os.path.join(work_dn, 'se_output')
	if '://' not in opts.output:
		opts.output = 'file:///%s' % os.path.abspath(opts.output)

	job_db = script_obj.job_db
	jobnum_list = job_db.get_job_list()
	status_mon = StatusMonitor(len(jobnum_list))
	if opts.shuffle:
		random.shuffle(jobnum_list)
	else:
		jobnum_list.sort()

	if opts.threads:
		activity = Activity('Processing jobs')
		pool = GCThreadPool(opts.threads)
		for jobnum in jobnum_list:
			pool.start_daemon('Processing job %d' % jobnum, process_job,
				opts, work_dn, status_mon, job_db, token, jobnum)
		pool.wait_and_drop()
		activity.finish()
	else:
		progress = ProgressActivity('Processing job', max(jobnum_list) + 1)
		for jobnum in jobnum_list:
			progress.update_progress(jobnum)
			process_job(opts, work_dn, status_mon, job_db, token, jobnum)
		progress.finish()

	# Print overview
	if not opts.hide_results:
		status_mon.show_results()
	return status_mon.is_finished()