示例#1
0
	def __init__(self, config, name):
		self._sandbox_helper = SandboxHelper(config)
		self._error_log_fn = config.get_work_path('error.tar')
		cancel_executor = CancelAndPurgeJobs(config, CondorCancelJobs(config),
				LocalPurgeJobs(config, self._sandbox_helper))
		BasicWMS.__init__(self, config, name,
			check_executor=CheckJobsMissingState(config, CondorCheckJobs(config)),
			cancel_executor=cancel_executor)
		self._task_id = config.get('task id', md5_hex(str(time.time())), persistent=True)  # FIXME
		# finalize config state by reading values or setting to defaults
		# load keys for condor pool ClassAds
		self._jdl_writer = CondorJDLWriter(config)
		self._universe = config.get('universe', 'vanilla', on_change=None)
		self._pool_req_dict = config.get_dict('poolArgs req', {})[0]
		self._pool_work_dn = None
		self._proc_factory = None
		(self._submit_exec, self._transfer_exec) = (None, None)
		# prepare interfaces for local/remote/ssh pool access
		self._remote_type = config.get_enum('remote Type', PoolType, PoolType.LOCAL)
		self._init_pool_interface(config)
		# Sandbox base path where individual job data is stored, staged and returned to
		self._sandbox_dn = config.get_path('sandbox path',
			config.get_work_path('sandbox'), must_exist=False)
		# broker for selecting sites - FIXME: this looks wrong... pool != site
		self._pool_host_list = config.get_list(['poolhostlist', 'pool host list'], [])
		self._broker_site = config.get_plugin('site broker', 'UserBroker', cls=Broker,
			bind_kwargs={'tags': [self]}, pargs=('sites', 'sites', lambda: self._pool_host_list))
		self._wall_time_mode = config.get_enum('wall time mode', WallTimeMode, WallTimeMode.ignore,
			subset=[WallTimeMode.hard, WallTimeMode.ignore])
		self._blacklist_nodes = config.get_list(['blacklist nodes'], [], on_change=None)
		self._user_requirements = config.get('user requirements', '', on_change=None)
示例#2
0
    def __init__(self, config, name):
        cancel_executor = CancelAndPurgeJobs(config, CREAMCancelJobs(config),
                                             CREAMPurgeJobs(config))
        GridWMS.__init__(
            self,
            config,
            name,
            submit_exec=resolve_install_path('glite-ce-job-submit'),
            output_exec=resolve_install_path('glite-ce-job-output'),
            check_executor=CREAMCheckJobs(config),
            cancel_executor=ChunkedExecutor(config, 'cancel', cancel_executor))

        self._delegate_exec = resolve_install_path('glite-ce-delegate-proxy')
        self._use_delegate = config.get_bool('try delegate',
                                             True,
                                             on_change=None)
        self._chunk_size = config.get_int('job chunk size', 10, on_change=None)
        self._submit_args_dict.update({
            '-r': self._ce,
            '--config-vo': self._config_fn
        })
        self._output_regex = r'.*For JobID \[(?P<rawId>\S+)\] output will be stored' + \
         ' in the dir (?P<output_dn>.*)$'

        if self._use_delegate is False:
            self._submit_args_dict['-a'] = ' '
示例#3
0
    def __init__(self, config, name):
        cancel_executor = CancelAndPurgeJobs(config, CREAMCancelJobs(config),
                                             CREAMPurgeJobs(config))
        GridWMS.__init__(
            self,
            config,
            name,
            submit_exec=resolve_install_path('glite-ce-job-submit'),
            output_exec=resolve_install_path('glite-ce-job-output'),
            check_executor=CREAMCheckJobs(config),
            cancel_executor=ChunkedExecutor(config, 'cancel', cancel_executor))

        self._log.info("CreamWMS.__init__")
        self._delegate_exec = resolve_install_path('glite-ce-delegate-proxy')
        self._use_delegate = config.get_bool('try delegate',
                                             True,
                                             on_change=None)
        self._chunk_size = config.get_int('job chunk size', 10, on_change=None)
        self._submit_args_dict.update({
            '-r': self._ce,
            '--config-vo': self._config_fn
        })
        self._output_regex = r'.*For JobID \[(?P<rawId>\S+)\] output will be stored' + \
         ' in the dir (?P<output_dn>.*)$'

        self._end_of_proxy_lifetime = None
        self._set_proxy_lifetime()
        #if self._use_delegate is False:
        #	self._submit_args_dict['-a'] = ' '

        self._lock_filename = os.path.join(os.path.expanduser("~"),
                                           ".gcFileLock")
        self._delegated_proxy_filename = None
        self._delegated_proxy_lock = os.path.join(os.path.expanduser("~"),
                                                  ".gcDelegatedProxyLock")
示例#4
0
    def __init__(self, config, name):
        cancelExecutor = CancelAndPurgeJobs(config, CREAM_CancelJobs(config),
                                            CREAM_PurgeJobs(config))
        GridWMS.__init__(self,
                         config,
                         name,
                         checkExecutor=CREAM_CheckJobs(config),
                         cancelExecutor=ChunkedExecutor(
                             config, 'cancel', cancelExecutor))

        self._nJobsPerChunk = config.getInt('job chunk size',
                                            10,
                                            onChange=None)

        self._submitExec = utils.resolveInstallPath('glite-ce-job-submit')
        self._outputExec = utils.resolveInstallPath('glite-ce-job-output')
        self._submitParams.update({
            '-r': self._ce,
            '--config-vo': self._configVO
        })

        self._outputRegex = r'.*For JobID \[(?P<rawId>\S+)\] output will be stored in the dir (?P<outputDir>.*)$'

        self._useDelegate = False
        if self._useDelegate is False:
            self._submitParams.update({'-a': ' '})
示例#5
0
	def __init__(self, config, name, submitExec, checkExecutor, cancelExecutor, nodesFinder = None, queuesFinder = None):
		config.set('broker', 'RandomBroker')
		config.setInt('wait idle', 20)
		config.setInt('wait work', 5)
		self.submitExec = submitExec
		self._sandbox_helper = SandboxHelper(config)
		BasicWMS.__init__(self, config, name, checkExecutor = checkExecutor,
			cancelExecutor = CancelAndPurgeJobs(config, cancelExecutor, LocalPurgeJobs(config, self._sandbox_helper)))

		def getNodes():
			if nodesFinder:
				return lmap(lambda x: x['name'], self._nodes_finder.discover())

		self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker,
			inherit = True, tags = [self], pargs = ('sites', 'sites', getNodes))

		def getQueues():
			if queuesFinder:
				result = {}
				for entry in queuesFinder.discover():
					result[entry.pop('name')] = entry
				return result

		self.brokerQueue = config.getPlugin('queue broker', 'UserBroker', cls = Broker,
			inherit = True, tags = [self], pargs = ('queue', 'queues', getQueues))

		self.scratchPath = config.getList('scratch path', ['TMPDIR', '/tmp'], onChange = True)
		self.submitOpts = config.get('submit options', '', onChange = None)
		self.memory = config.getInt('memory', -1, onChange = None)
示例#6
0
    def __init__(self,
                 config,
                 name,
                 submit_exec,
                 check_executor,
                 cancel_executor,
                 nodes_finder=None,
                 queues_finder=None):
        config.set('broker', 'RandomBroker')
        config.set_int('wait idle', 20)
        config.set_int('wait work', 5)
        self._submit_exec = submit_exec
        self._sandbox_helper = SandboxHelper(config)
        BasicWMS.__init__(self,
                          config,
                          name,
                          check_executor=check_executor,
                          cancel_executor=CancelAndPurgeJobs(
                              config, cancel_executor,
                              LocalPurgeJobs(config, self._sandbox_helper)))

        def _get_nodes_list():
            if nodes_finder:
                return lmap(lambda x: x['name'], nodes_finder.discover())

        self._broker_site = config.get_plugin('site broker',
                                              'UserBroker',
                                              cls=Broker,
                                              bind_kwargs={
                                                  'inherit': True,
                                                  'tags': [self]
                                              },
                                              pargs=('sites', 'sites',
                                                     _get_nodes_list))

        def _get_queues_list():
            if queues_finder:
                result = {}
                for entry in queues_finder.discover():
                    result[entry.pop('name')] = entry
                return result

        self._broker_queue = config.get_plugin('queue broker',
                                               'UserBroker',
                                               cls=Broker,
                                               bind_kwargs={
                                                   'inherit': True,
                                                   'tags': [self]
                                               },
                                               pargs=('queue', 'queues',
                                                      _get_queues_list))

        self._scratch_path = config.get_list('scratch path',
                                             ['TMPDIR', '/tmp'],
                                             on_change=True)
        self._submit_opt_list = shlex.split(
            config.get('submit options', '', on_change=None))
        self._memory = config.get_int('memory', -1, on_change=None)
示例#7
0
	def __init__(self, config, name):
		self._sandbox_helper = SandboxHelper(config)
		BasicWMS.__init__(self, config, name,
			checkExecutor = CheckJobsMissingState(config, Condor_CheckJobs(config)),
			cancelExecutor = CancelAndPurgeJobs(config, Condor_CancelJobs(config), LocalPurgeJobs(config, self._sandbox_helper)))
		# special debug out/messages/annotations - may have noticeable effect on storage and performance!
		debugLogFN = config.get('debugLog', '')
		self.debug = False
		if debugLogFN:
			self.debug = open(debugLogFN, 'a')
		######
		try:
			random_task_id = md5(str(time.time())).hexdigest()
		except TypeError:
			random_task_id = md5(str(time.time()).encode()).hexdigest()
		self.taskID = config.get('task id', random_task_id, persistent = True) # FIXME!
		self.debugOut("""

		#############################
		Initialized Condor/GlideInWMS
		#############################
		Config: %s
		taskID: %s
		Name:   %s
		#############################

		""" % (config.getConfigName(), self.taskID, name))
		# finalize config state by reading values or setting to defaults
		self.settings={
			'jdl': {
				'Universe' : config.get('Universe', 'vanilla'),
				'NotifyEmail' : config.get('NotifyEmail', ''),
				'ClassAdData' : config.getList('ClassAdData',[]),
				'JDLData' : config.getList('JDLData',[])
				},
			'pool' : {
				'hosts' : config.getList('PoolHostList',[])
				}
			}
		# prepare interfaces for local/remote/ssh pool access
		self._initPoolInterfaces(config)
		# load keys for condor pool ClassAds
		self.poolReqs  = config.getDict('poolArgs req', {})[0]
		self.poolQuery = config.getDict('poolArgs query', {})[0]
		# Sandbox base path where individual job data is stored, staged and returned to
		self.sandPath = config.getPath('sandbox path', config.getWorkPath('sandbox'), mustExist = False)
		# history query is faster with split files - check if and how this is used
		# default condor_history command works WITHOUT explicitly specified file
		self.historyFile = None
		if self.remoteType == PoolType.LOCAL and getoutput( self.configValExec + ' ENABLE_HISTORY_ROTATION').lower() == 'true':
			self.historyFile = getoutput( self.configValExec + ' HISTORY')
			if not os.path.isfile(self.historyFile):
				self.historyFile = None
		# broker for selecting Sites
		self.brokerSite = config.getPlugin('site broker', 'UserBroker', cls = Broker,
			tags = [self], pargs = ('sites', 'sites', self.getSites))
		self.debugFlush()