class BaseJobFileTarAdaptor(object):
	def __init__(self, path):
		activity = Activity('Reading dataset partition file')
		self._lock = GCLock()
		self._fmt = utils.DictFormat()
		self._tar = tarfile.open(path, 'r:')
		(self._cacheKey, self._cacheTar) = (None, None)

		metadata = self._fmt.parse(self._tar.extractfile('Metadata').readlines(), keyParser = {None: str})
		self.maxJobs = metadata.pop('MaxJobs')
		self.classname = metadata.pop('ClassName')
		self.metadata = {'dataset': dict(ifilter(lambda k_v: not k_v[0].startswith('['), metadata.items()))}
		for (k, v) in ifilter(lambda k_v: k_v[0].startswith('['), metadata.items()):
			self.metadata.setdefault('dataset %s' % k.split(']')[0].lstrip('['), {})[k.split(']')[1].strip()] = v
		activity.finish()

		self._parserMap = { None: str, DataSplitter.NEntries: int, DataSplitter.Skipped: int,
			DataSplitter.DatasetID: int, DataSplitter.Invalid: parseBool,
			DataSplitter.Locations: lambda x: parseList(x, ','),
			DataSplitter.MetadataHeader: parseJSON,
			DataSplitter.Metadata: lambda x: parseJSON(x.strip("'")) }

	def __getitem__(self, key):
		if key >= self.maxJobs:
			raise IndexError('Invalid dataset partition %s' % repr(key))
		try:
			self._lock.acquire()
			return self._getPartition(key)
		finally:
			self._lock.release()
Exemple #2
0
 def __init__(self, config, workflow):
     config.set('report', 'BasicReport BarReport')
     (self._stored_stdout, self._stored_stderr) = (sys.stdout, sys.stderr)
     GUI.__init__(self, config, workflow)
     self._reportHeight = 0
     self._statusHeight = 1
     self._old_message = None
     self._lock = GCLock(threading.RLock())  # drawing lock
     self._last_report = 0
     self._old_size = None
class StatusMonitor(object):
    def __init__(self, num_jobs):
        self._result = {}
        self._lock = GCLock()
        self._num_jobs = num_jobs

    def is_finished(self):
        num_success = sum(
            imap(lambda jds: self._result.get(jds, 0), [
                JobDownloadStatus.JOB_OK, JobDownloadStatus.JOB_ALREADY,
                JobDownloadStatus.JOB_INCOMPLETE
            ]))
        return num_success == self._num_jobs

    def register_file_result(self, jobnum, fi_idx, msg, status, proc=None):
        if proc:
            log.log_process(proc, msg=log_intro(jobnum, fi_idx) + msg)
        else:
            log.info(log_intro(jobnum, fi_idx) + msg)
        self._register_result(status)
        return status  # returned file status is actually used later

    def register_job_result(self, jobnum, msg, status):
        log.info(log_intro(jobnum) + msg)
        self._register_result(status)

    def show_results(self):
        def _iter_download_results(cls):
            marker = False
            for stat in sorted(self._result, key=self._result.get):
                if self._result[stat] and (stat in cls.enum_value_list):
                    yield {0: cls.enum2str(stat), 1: self._result[stat]}
                    marker = True
            if marker:
                yield '='
            yield {0: 'Total', 1: self._num_jobs}

        if self._result:
            ConsoleTable.create([(0, 'Status'), (1, '')],
                                _iter_download_results(JobDownloadStatus),
                                title='Job status overview')
            ConsoleTable.create([(0, 'Status'), (1, '')],
                                _iter_download_results(FileDownloadStatus),
                                title='File status overview')

    def _register_result(self, status):
        self._lock.acquire()
        try:
            self._result[status] = self._result.get(status, 0) + 1
        finally:
            self._lock.release()
Exemple #4
0
 def __init__(self, config, workflow):
     GUI.__init__(self, config, workflow)
     install_console_reset()
     self._console_lock = GCLock(threading.RLock())  # terminal output lock
     self._exc = ExceptionCollector()
     (self._redraw_thread, self._redraw_shutdown) = (None, False)
     (self._redraw_event,
      self._immediate_redraw_event) = (GCEvent(rlock=True),
                                       GCEvent(rlock=True))
     self._redraw_interval = config.get_float('gui redraw interval',
                                              0.1,
                                              on_change=None)
     self._redraw_delay = config.get_float('gui redraw delay',
                                           0.05,
                                           on_change=None)
     element = config.get_composited_plugin('gui element',
                                            'report activity log',
                                            'MultiGUIElement',
                                            cls=GUIElement,
                                            on_change=None,
                                            bind_kwargs={'inherit': True},
                                            pargs=(workflow,
                                                   self._redraw_event,
                                                   sys.stdout))
     self._element = FrameGUIElement(config, 'gui', workflow,
                                     self._redraw_event, sys.stdout,
                                     self._immediate_redraw_event, element)
Exemple #5
0
class GCStreamHandler(logging.Handler):
	def __init__(self):
		logging.Handler.__init__(self)
		self._lock = GCLock(threading.RLock())

	def get_stream(self):
		raise AbstractError

	def emit(self, record):
		self._lock.acquire()
		try:
			stream = self.get_stream()
			stream.write(self.format(record) + '\n')
			stream.flush()
		finally:
			self._lock.release()
Exemple #6
0
class GCStreamHandler(logging.Handler):
    def __init__(self):
        logging.Handler.__init__(self)
        self._lock = GCLock(threading.RLock())

    def get_stream(self):
        raise AbstractError

    def emit(self, record):
        self._lock.acquire()
        try:
            stream = self.get_stream()
            stream.write(self.format(record) + '\n')
            stream.flush()
        finally:
            self._lock.release()
class StatusMonitor(object):
	def __init__(self, num_jobs):
		self._result = {}
		self._lock = GCLock()
		self._num_jobs = num_jobs

	def is_finished(self):
		num_success = sum(imap(lambda jds: self._result.get(jds, 0), [
			JobDownloadStatus.JOB_OK, JobDownloadStatus.JOB_ALREADY, JobDownloadStatus.JOB_INCOMPLETE]))
		return num_success == self._num_jobs

	def register_file_result(self, jobnum, fi_idx, msg, status, proc=None):
		if proc:
			log.log_process(proc, msg=log_intro(jobnum, fi_idx) + msg)
		else:
			log.info(log_intro(jobnum, fi_idx) + msg)
		self._register_result(status)
		return status  # returned file status is actually used later

	def register_job_result(self, jobnum, msg, status):
		log.info(log_intro(jobnum) + msg)
		self._register_result(status)

	def show_results(self):
		def _iter_download_results(cls):
			marker = False
			for stat in sorted(self._result, key=self._result.get):
				if self._result[stat] and (stat in cls.enum_value_list):
					yield {0: cls.enum2str(stat), 1: self._result[stat]}
					marker = True
			if marker:
				yield '='
			yield {0: 'Total', 1: self._num_jobs}

		if self._result:
			ConsoleTable.create([(0, 'Status'), (1, '')],
				_iter_download_results(JobDownloadStatus), title='Job status overview')
			ConsoleTable.create([(0, 'Status'), (1, '')],
				_iter_download_results(FileDownloadStatus), title='File status overview')

	def _register_result(self, status):
		self._lock.acquire()
		try:
			self._result[status] = self._result.get(status, 0) + 1
		finally:
			self._lock.release()
Exemple #8
0
	def __init__(self, config, workflow):
		config.set('report', 'BasicReport BarReport')
		(self._stored_stdout, self._stored_stderr) = (sys.stdout, sys.stderr)
		GUI.__init__(self, config, workflow)
		self._reportHeight = 0
		self._statusHeight = 1
		self._old_message = None
		self._lock = GCLock(threading.RLock()) # drawing lock
		self._last_report = 0
		self._old_size = None
Exemple #9
0
	def __init__(self, path):
		activity = utils.ActivityLog('Reading dataset partition file')
		self._lock = GCLock()
		self._fmt = utils.DictFormat()
		self._tar = tarfile.open(path, 'r:')
		(self._cacheKey, self._cacheTar) = (None, None)

		metadata = self._fmt.parse(self._tar.extractfile('Metadata').readlines(), keyParser = {None: str})
		self.maxJobs = metadata.pop('MaxJobs')
		self.classname = metadata.pop('ClassName')
		self.metadata = {'dataset': dict(ifilter(lambda k_v: not k_v[0].startswith('['), metadata.items()))}
		for (k, v) in ifilter(lambda k_v: k_v[0].startswith('['), metadata.items()):
			self.metadata.setdefault('dataset %s' % k.split(']')[0].lstrip('['), {})[k.split(']')[1].strip()] = v
		activity.finish()

		self._parserMap = { None: str, DataSplitter.NEntries: int, DataSplitter.Skipped: int,
			DataSplitter.DatasetID: int, DataSplitter.Invalid: parseBool,
			DataSplitter.Locations: lambda x: parseList(x, ','),
			DataSplitter.MetadataHeader: parseJSON,
			DataSplitter.Metadata: lambda x: parseJSON(x.strip("'")) }
class GCStreamHandler(logging.Handler):
	global_instances = []
	global_lock = GCLock()

	# In contrast to StreamHandler, this logging handler doesn't keep a stream copy
	def __init__(self):
		logging.Handler.__init__(self)
		self.lock = GCLock(threading.RLock())  # default-allocated lock is sometimes non-reentrant
		self.global_lock = None
		GCStreamHandler.global_instances.append(self)

	def __del__(self):
		GCStreamHandler.global_instances.remove(self)

	def emit(self, record):  # locking done by handle
		stream = self.get_stream()
		stream.write(self.format(record) + '\n')
		stream.flush()

	def get_stream(self):
		raise AbstractError

	def handle(self, record):
		filter_result = self.filter(record)
		if filter_result:
			with_lock(self.global_lock or self.lock, self.emit, record)
		return filter_result

	def pop_std_stream(cls):
		def _pop_std_stream(handler_cls):
			ignore_exception(AttributeError, None, lambda stream: stream.disable(), handler_cls.stream[-1])
			handler_cls.stream.pop()
			ignore_exception(AttributeError, None, lambda stream: stream.enable(), handler_cls.stream[-1])
		_pop_std_stream(StdoutStreamHandler)
		_pop_std_stream(StderrStreamHandler)
	pop_std_stream = classmethod(pop_std_stream)

	def push_std_stream(cls, stream_stdout, stream_stderr):
		def _push_std_stream(handler_cls, user_stream):
			ignore_exception(AttributeError, None, lambda stream: stream.disable(), handler_cls.stream[-1])
			handler_cls.stream.append(user_stream)
			ignore_exception(AttributeError, None, lambda stream: stream.enable(), handler_cls.stream[-1])
		_push_std_stream(StdoutStreamHandler, stream_stdout)
		_push_std_stream(StderrStreamHandler, stream_stderr)
	push_std_stream = classmethod(push_std_stream)

	def set_global_lock(cls, lock=None):
		GCStreamHandler.global_lock.acquire()
		for instance in GCStreamHandler.global_instances:
			instance.acquire()
			instance.global_lock = lock
			instance.release()
		GCStreamHandler.global_lock.release()
	set_global_lock = classmethod(set_global_lock)
Exemple #11
0
def download_monitored(jobNum, output, fileIdx, checkPath, sourcePath, targetPath):
	copyAbortLock = GCLock()
	monitorLock = GCLock()
	monitorLock.acquire()
	monitor = start_thread('Download monitor %s' % jobNum, transfer_monitor, output, fileIdx, checkPath, monitorLock, copyAbortLock)
	result = -1
	procCP = se_copy(sourcePath, targetPath, tmp = checkPath)
	while True:
		if not copyAbortLock.acquire(False):
			monitor.join()
			break
		copyAbortLock.release()
		result = procCP.status(timeout = 0)
		if result is not None:
			monitorLock.release()
			monitor.join()
			break
		time.sleep(0.02)

	if result != 0:
		output.error('Unable to copy file from SE!')
		log.critical('%s\n%s\n', procCP.stdout.read(timeout = 0), procCP.stderr.read(timeout = 0))
		return False
	return True
Exemple #12
0
class LocalPurgeJobs(CancelJobs):
    purge_lock = GCLock()

    def __init__(self, config, sandbox_helper):
        CancelJobs.__init__(self, config)
        self._sandbox_helper = sandbox_helper

    def execute(self, wms_id_list,
                wms_name):  # yields list of purged (wms_id,)
        activity = Activity('waiting for jobs to finish')
        time.sleep(5)
        for wms_id in wms_id_list:
            path = self._sandbox_helper.get_sandbox('WMSID.%s.%s' %
                                                    (wms_name, wms_id))
            if path is None:
                self._log.warning('Sandbox for job %r could not be found',
                                  wms_id)
                continue
            with_lock(LocalPurgeJobs.purge_lock, _purge_directory, self._log,
                      path, wms_id)
            yield (wms_id, )
        activity.finish()
Exemple #13
0
class MultiActivityMonitor(ActivityMonitor):
    alias_list = ['multi_stream']
    global_lock = GCLock(threading.RLock())

    def __init__(self, config, stream, register_callback=False):
        ActivityMonitor.__init__(self, config, stream, register_callback)
        self._fold = config.get_float('activity fold fraction',
                                      0.5,
                                      on_change=None)
        install_console_reset()

    def write(self, value=''):
        with_lock(MultiActivityMonitor.global_lock, self._write, value)

    def _format_activity(self, width, level, activity, activity_list):
        msg = activity.get_msg(truncate=width - 5 - 2 * level)
        return '  ' * level + ANSI.color_grayscale(1 - level /
                                                   5.) + msg + ANSI.reset

    def _write(self, value):
        max_yx = Console.getmaxyx()
        max_x = min(max_yx[1], self._msg_len_max)
        value = value.replace('\n', ANSI.erase_line + '\n' + ANSI.erase_line)
        self._stream.write(value + '\n' + ANSI.wrap_off)
        activity_list = list(Activity.root.get_children())
        max_depth = int(max_yx[0] * self._fold)
        while len(activity_list) > int(max_yx[0] * self._fold):
            activity_list = lfilter(
                lambda activity: activity.depth - 1 < max_depth, activity_list)
            max_depth -= 1
        for activity in activity_list:
            msg = self._format_activity(max_x, activity.depth - 1, activity,
                                        activity_list)
            self._stream.write(ANSI.erase_line + msg + '\n')
        self._stream.write(ANSI.erase_down +
                           ANSI.move_up(len(activity_list) + 1) + ANSI.wrap_on)
        self._stream.flush()
Exemple #14
0
 def __init__(self):
     logging.Handler.__init__(self)
     self._lock = GCLock(threading.RLock())
Exemple #15
0
def download_monitored(jobNum, output, fileIdx, checkPath, sourcePath,
                       targetPath):
    copyAbortLock = GCLock()
    monitorLock = GCLock()
    monitorLock.acquire()
    monitor = start_thread('Download monitor %s' % jobNum, transfer_monitor,
                           output, fileIdx, checkPath, monitorLock,
                           copyAbortLock)
    result = -1
    procCP = se_copy(sourcePath, targetPath, tmp=checkPath)
    while True:
        if not copyAbortLock.acquire(False):
            monitor.join()
            break
        copyAbortLock.release()
        result = procCP.status(timeout=0)
        if result is not None:
            monitorLock.release()
            monitor.join()
            break
        time.sleep(0.02)

    if result != 0:
        output.error('Unable to copy file from SE!')
        log.critical('%s\n%s\n', procCP.stdout.read(timeout=0),
                     procCP.stderr.read(timeout=0))
        return False
    return True
Exemple #16
0
class Activity(object):
    lock = GCLock()
    counter = 0
    running_by_thread_name = {}
    callbacks = []

    def __init__(self,
                 msg=None,
                 level=logging.INFO,
                 name=None,
                 parent=None,
                 fmt='%(msg)s...',
                 log=False,
                 logger=None
                 ):  # log == None - only at start/finish; log == True - always
        (self._level, self._msg_dict, self._fmt) = (level, {'msg': ''}, fmt)
        (self.name, self._parent, self._children) = (name, None, [])
        (self._log, self._logger) = (log, logger or logging.getLogger())
        if (self._log is not False) and msg:
            self._logger.log(level, msg)
        self._current_thread_name = get_thread_name()

        with_lock(Activity.lock, self._add_activity, parent)
        self.depth = len(list(self.get_parents()))

        if self._parent:
            self._parent.add_child(self)
        self.update(msg)

    def __del__(self):
        self.finish()

    def __repr__(self):
        parent_name = None
        if self._parent:
            parent_name = self._parent.name
        return '%s(name: %r, msg_dict: %r, lvl: %s, depth: %d, parent: %s)' % (
            self.__class__.__name__, self.name, self._msg_dict, self._level,
            self.depth, parent_name)

    def add_child(self, value):
        self._children.append(value)

    def finish(self):
        for child in list(self._children):
            child.finish()
        if self._parent:
            self._parent.remove_child(self)
        running_list = Activity.running_by_thread_name.get(
            self._current_thread_name, [])
        if self in running_list:
            running_list.remove(self)
            if self._log is not False:
                self._logger.log(self._level, self.get_msg() + ' finished')

    def get_children(self):
        for child in self._children:
            yield child
            for subchild in child.get_children():
                yield subchild

    def get_msg(self, truncate=None, last=35):
        msg = (self._fmt % self._msg_dict).strip()
        if (truncate is not None) and (len(msg) > truncate):
            msg = msg[:truncate - last - 3] + '...' + msg[-last:]
        return msg

    def get_parents(self):
        if self._parent is not None:
            for parent in self._parent.get_parents():
                yield parent
            yield self._parent

    def remove_child(self, value):
        if value in self._children:
            self._children.remove(value)

    def update(self, msg):
        self._set_msg(msg=msg)

    def _add_activity(self, parent):
        Activity.counter += 1
        # search parent:
        self._cleanup_running()  # cleanup list of running activities
        for parent_candidate in self._iter_possible_parents(
                self._current_thread_name):
            if (parent is None) or (parent == parent_candidate.name):
                self._parent = parent_candidate
                break
        if (parent is not None) and (self._parent is None):
            raise APIError('Invalid parent given!')
        # set this activity as topmost activity in the current thread
        Activity.running_by_thread_name.setdefault(self._current_thread_name,
                                                   []).append(self)

    def _cleanup_running(self):
        # clean running activity list
        running_thread_names = set(imap(get_thread_name,
                                        threading.enumerate()))
        for thread_name in list(Activity.running_by_thread_name):
            if thread_name not in running_thread_names:
                finished_activities = Activity.running_by_thread_name.get(
                    thread_name, [])
                while finished_activities:
                    finished_activities[-1].finish()
                Activity.running_by_thread_name.pop(thread_name, None)

    def _iter_possible_parents(self, current_thread_name):
        # yield activities in current and parent threads
        stack = list(
            Activity.running_by_thread_name.get(current_thread_name, []))
        stack.reverse()  # in reverse order of creation
        for item in stack:
            yield item
        if '-' in current_thread_name:
            for item in self._iter_possible_parents(
                    rsplit(current_thread_name, '-', 1)[0]):
                yield item

    def _set_msg(self, **kwargs):
        self._msg_dict.update(
            filter_dict(kwargs, value_filter=lambda value: value is not None))
        for callback in Activity.callbacks:
            callback()
        if self._log:
            self._logger.log(self._level, self.get_msg())
	def __init__(self, num_jobs):
		self._result = {}
		self._lock = GCLock()
		self._num_jobs = num_jobs
Exemple #18
0
class AFSAccessToken(RefreshableAccessToken):
    alias_list = ['afs', 'AFSProxy', 'KerberosAccessToken']
    env_lock = GCLock()

    def __init__(self, config, name):
        RefreshableAccessToken.__init__(self, config, name)
        self._kinit_exec = resolve_install_path('kinit')
        self._klist_exec = resolve_install_path('klist')
        self._aklog_exec = resolve_install_path('aklog')
        self._cache = None
        self._map_auth_name2fn = dict(
            imap(lambda name: (name, config.get_work_path('proxy.%s' % name)),
                 ['KRB5CCNAME', 'KRBTKFILE']))
        self._auth_fn_list = []
        with_lock(AFSAccessToken.env_lock, self._backup_tickets, config)
        self._tickets = config.get_list('tickets', [], on_change=None)

    def get_auth_fn_list(self):
        return self._auth_fn_list

    def get_fq_user_name(self):
        return self._get_principal()

    def get_group(self):
        return self._get_principal().split('@')[1]

    def get_user_name(self):
        return self._get_principal().split('@')[0]

    def _backup_tickets(self, config):
        import stat, shutil
        for name in self._map_auth_name2fn:  # store kerberos files in work directory for persistency
            if name in os.environ:
                fn = os.environ[name].replace('FILE:', '')
                if fn != self._map_auth_name2fn[name]:
                    shutil.copyfile(fn, self._map_auth_name2fn[name])
                os.chmod(self._map_auth_name2fn[name],
                         stat.S_IRUSR | stat.S_IWUSR)
                os.environ[name] = self._map_auth_name2fn[name]
                self._auth_fn_list.append(os.environ[name])

    def _get_principal(self):
        info = self._parse_tickets()
        return info.get('default principal', info.get('principal'))

    def _get_timeleft(self, cached):
        info = self._parse_tickets(cached)['tickets']
        time_current = time.time()
        time_end = None
        for ticket in info:
            if (self._tickets and (ticket not in self._tickets)) or not ticket:
                continue
            if time_end is None:
                time_end = info[ticket]
            time_end = min(info[ticket], time_end)
        time_end = time_end or 0
        return time_end - time_current

    def _parse_tickets(self, cached=True):
        # Return cached results if requested
        if cached and self._cache:
            return self._cache
        # Call klist and parse results
        proc = LocalProcess(self._klist_exec)
        self._cache = {}
        try:
            for line in proc.stdout.iter(timeout=10):
                if line.count('@') and (line.count(':') > 1):
                    issued_expires, principal = rsplit(line, '  ', 1)
                    issued_expires = issued_expires.replace('/', ' ').split()
                    assert len(issued_expires) % 2 == 0
                    issued_str = str.join(
                        ' ', issued_expires[:int(len(issued_expires) / 2)])
                    expires_str = str.join(
                        ' ', issued_expires[int(len(issued_expires) / 2):])

                    if expires_str.count(' ') == 3:
                        if len(expires_str.split()[2]) == 2:
                            expires = _parse_date(expires_str,
                                                  '%m %d %y %H:%M:%S')
                        else:
                            expires = _parse_date(expires_str,
                                                  '%m %d %Y %H:%M:%S')
                    elif expires_str.count(
                            ' ') == 2:  # year information is missing
                        cur_year = int(time.strftime('%Y'))
                        expires = _parse_date(expires_str + ' %d' % cur_year,
                                              '%b %d %H:%M:%S %Y')
                        issued = _parse_date(issued_str + ' %d' % cur_year,
                                             '%b %d %H:%M:%S %Y')
                        if expires < issued:  # wraparound at new year
                            expires = _parse_date(
                                expires_str + ' %d' % (cur_year + 1),
                                '%b %d %H:%M:%S %Y')
                    self._cache.setdefault('tickets', {})[principal] = expires
                elif line.count(':') == 1:
                    (key, value) = lmap(str.strip, line.split(':', 1))
                    self._cache[key.lower()] = value
        except Exception:
            raise AccessTokenError(
                'Unable to parse kerberos ticket information!')
        proc.status_raise(timeout=0)
        return self._cache

    def _refresh_access_token(self):
        timeleft_before = str_time_long(self._get_timeleft(cached=False))
        LocalProcess(self._kinit_exec, '-R').finish(timeout=10)
        LocalProcess(self._aklog_exec).finish(timeout=10)
        timeleft_after = str_time_long(self._get_timeleft(cached=False))
        self._log.log(logging.INFO2,
                      'Time left for access token "%s" changed from %s to %s',
                      self.get_object_name(), timeleft_before, timeleft_after)
Exemple #19
0
            if os.WIFSIGNALED(self._status):
                return self._signal_dict.get(os.WTERMSIG(self._status),
                                             'SIG_UNKNOWN')
            elif os.WIFEXITED(self._status):
                return os.WEXITSTATUS(self._status)
        if terminate:
            return self.terminate(timeout=1)

    def terminate(self, timeout):
        status = self.status(timeout=0)
        if status is not None:
            return status
        self.kill(signal.SIGTERM)
        result = self.status(timeout, terminate=False)
        if result is not None:
            return result
        self.kill(signal.SIGKILL)
        return self.status(timeout, terminate=False)

    def kill(self, sig=signal.SIGTERM):
        if not self._event_finished.is_set():
            try:
                os.kill(self._pid, sig)
            except OSError:
                if sys.exc_info(
                )[1].errno != errno.ESRCH:  # errno.ESRCH: no such process (already dead)
                    raise


LocalProcess.fdCreationLock = GCLock()
Exemple #20
0
class LocalProcess(Process):
    fd_creation_lock = GCLock()

    def __init__(self, cmd, *args, **kwargs):
        self._signal_dict = {}
        for attr in dir(signal):
            if attr.startswith('SIG') and ('_' not in attr):
                self._signal_dict[getattr(signal, attr)] = attr
        terminal = kwargs.pop('term', 'vt100')
        (self._status, self._runtime, self._pid) = (None, None, None)
        Process.__init__(self, cmd, *args, **kwargs)
        if terminal is not None:
            self._env_dict['TERM'] = terminal

    def kill(self, sig=signal.SIGTERM):
        if not self._event_finished.is_set():
            try:
                os.kill(self._pid, sig)
            except OSError:
                if get_current_exception(
                ).errno != errno.ESRCH:  # errno.ESRCH: no such process (already dead)
                    raise
                clear_current_exception()

    def status(self, timeout, terminate=False):
        self._event_finished.wait(timeout, 'process to finish')
        if self._status is False:
            return 'OS_ABORT'
        elif self._status is not None:  # return either signal name or exit code
            if os.WIFSIGNALED(self._status):
                return self._signal_dict.get(os.WTERMSIG(self._status),
                                             'SIG_UNKNOWN')
            elif os.WIFEXITED(self._status):
                return os.WEXITSTATUS(self._status)
        if terminate:
            return self.terminate(timeout=1)

    def terminate(self, timeout):
        status = self.status(timeout=0)
        if status is not None:
            return status
        self.kill(signal.SIGTERM)
        result = self.status(timeout, terminate=False)
        if result is not None:
            return result
        self.kill(signal.SIGKILL)
        return self.status(timeout, terminate=False)

    def _handle_input(cls, fd_write, buffer, event_shutdown):
        local_buffer = ''
        while not event_shutdown.is_set():
            if local_buffer:  # local buffer has leftover bytes from last write - just poll for more
                local_buffer = buffer.get(timeout=0, default='')
            else:  # empty local buffer - wait for data to process
                local_buffer = buffer.get(timeout=1, default='')
            if local_buffer:
                _wait_fd(fd_write_list=[fd_write])
                if not event_shutdown.is_set():
                    written = ignore_exception(OSError, 0, os.write, fd_write,
                                               str2bytes(local_buffer))
                    local_buffer = local_buffer[written:]

    _handle_input = classmethod(_handle_input)

    def _handle_output(cls, fd_read, buffer, event_shutdown):
        def _read_to_buffer():
            while True:
                try:
                    tmp = bytes2str(os.read(fd_read, 32 * 1024))
                except OSError:
                    break
                if not tmp:
                    break
                buffer.put(tmp)

        while not event_shutdown.is_set():
            _wait_fd(fd_read_list=[fd_read])
            _read_to_buffer()
        _read_to_buffer()  # Final readout after process finished

    _handle_output = classmethod(_handle_output)

    def _interact_with_child(self, pid, fd_parent_stdin, fd_parent_stdout,
                             fd_parent_stderr):
        thread_in = self._start_watcher('stdin', False, pid,
                                        self._handle_input, fd_parent_stdin,
                                        self._buffer_stdin,
                                        self._event_shutdown)
        thread_out = self._start_watcher('stdout', False, pid,
                                         self._handle_output, fd_parent_stdout,
                                         self._buffer_stdout,
                                         self._event_shutdown)
        thread_err = self._start_watcher('stderr', False, pid,
                                         self._handle_output, fd_parent_stderr,
                                         self._buffer_stderr,
                                         self._event_shutdown)
        while self._status is None:
            # blocking (with spurious wakeups!) - OSError=unable to wait for child - status=False => OS_ABORT
            (result_pid, status) = ignore_exception(OSError, (pid, False),
                                                    os.waitpid, pid, 0)
            if result_pid == pid:
                self._status = status
        self._time_finished = time.time()
        self._event_shutdown.set(
        )  # start shutdown of handlers and wait for it to finish
        self._buffer_stdin.finish()  # wakeup process input handler
        thread_in.join()
        thread_out.join()
        thread_err.join()
        for fd_open in set(
            [fd_parent_stdin, fd_parent_stdout, fd_parent_stderr]):
            os.close(fd_open)  # fd_parent_stdin == fd_parent_stdout for pty
        self._buffer_stdout.finish()  # wakeup pending output buffer waits
        self._buffer_stderr.finish()
        self._event_finished.set()

    def _setup_terminal(self, fd_terminal):
        attr = termios.tcgetattr(fd_terminal)
        attr[1] = attr[1] & ~termios.ONLCR  # disable \n -> \r\n
        attr[3] = attr[3] & ~termios.ECHO  # disable terminal echo
        attr[3] = attr[3] | termios.ICANON  # enable canonical mode
        attr[3] = attr[3] | termios.ISIG  # enable signals
        self.stdin.EOF = bytes2str(
            termios.tcgetattr(fd_terminal)[6][termios.VEOF])
        termios.tcsetattr(fd_terminal, termios.TCSANOW, attr)

    def _start(self):
        (self._status, self._runtime, self._pid) = (None, None, None)
        # Setup of file descriptors - stdin / stdout via pty, stderr via pipe
        LocalProcess.fd_creation_lock.acquire()
        try:
            # terminal is used for stdin / stdout
            fd_parent_terminal, fd_child_terminal = os.openpty()
            fd_parent_stdin, fd_child_stdin = (fd_parent_terminal,
                                               fd_child_terminal)
            fd_parent_stdout, fd_child_stdout = (fd_parent_terminal,
                                                 fd_child_terminal)
            fd_parent_stderr, fd_child_stderr = os.pipe()  # Returns (r, w) FDs
        finally:
            LocalProcess.fd_creation_lock.release()

        self._setup_terminal(fd_parent_terminal)
        for fd_setup in [fd_parent_stdout, fd_parent_stderr
                         ]:  # non-blocking operation on stdout/stderr
            fcntl.fcntl(fd_setup, fcntl.F_SETFL,
                        os.O_NONBLOCK | fcntl.fcntl(fd_setup, fcntl.F_GETFL))

        pid = os.fork()
        self._time_started = time.time()
        self._time_finished = None
        fd_map = {0: fd_child_stdin, 1: fd_child_stdout, 2: fd_child_stderr}
        if pid == 0:  # We are in the child process - redirect streams and exec external program
            from grid_control.utils.process_child import run_command
            run_command(self._cmd, [self._cmd] + self._args, fd_map,
                        self._env_dict)

        else:  # Still in the parent process - setup threads to communicate with external program
            os.close(fd_child_terminal)
            os.close(fd_child_stderr)
            self._pid = pid
            self._start_watcher('interact', True, pid,
                                self._interact_with_child, pid,
                                fd_parent_stdin, fd_parent_stdout,
                                fd_parent_stderr)

    def _start_watcher(self, desc, daemon, pid, *args):
        desc += ' (%d:%r)' % (pid, [self._cmd] + self._args)
        if daemon:
            return start_daemon(desc, *args)
        return start_thread(desc, *args)
Exemple #21
0
 def __init__(self):
     logging.Handler.__init__(self)
     self.lock = GCLock(threading.RLock(
     ))  # default-allocated lock is sometimes non-reentrant
     self.global_lock = None
     GCStreamHandler.global_instances.append(self)
Exemple #22
0
class ANSIGUI(GUI):
	def __init__(self, config, workflow):
		config.set('report', 'BasicReport BarReport')
		(self._stored_stdout, self._stored_stderr) = (sys.stdout, sys.stderr)
		GUI.__init__(self, config, workflow)
		self._reportHeight = 0
		self._statusHeight = 1
		self._old_message = None
		self._lock = GCLock(threading.RLock()) # drawing lock
		self._last_report = 0
		self._old_size = None

	def _draw(self, fun):
		new_size = self._console.getmaxyx()
		if self._old_size != new_size:
			self._old_size = new_size
			self._schedule_update_layout()
		self._lock.acquire()
		self._console.hideCursor()
		self._console.savePos()
		try:
			fun()
		finally:
			self._console.loadPos()
			self._console.showCursor()
			self._lock.release()

	# Event handling for resizing
	def _update_layout(self):
		(sizey, sizex) = self._console.getmaxyx()
		self._old_size = (sizey, sizex)
		self._reportHeight = self._report.getHeight()
		self._console.erase()
		self._console.setscrreg(min(self._reportHeight + self._statusHeight + 1, sizey), sizey)
		utils.printTabular.wraplen = sizex - 5
		self._update_all()

	def _schedule_update_layout(self, sig = None, frame = None):
		start_thread('update layout', self._draw, self._update_layout) # using new thread to ensure RLock is free

	def _wait(self, timeout):
		oldHandler = signal.signal(signal.SIGWINCH, self._schedule_update_layout)
		result = utils.wait(timeout)
		signal.signal(signal.SIGWINCH, oldHandler)
		return result

	def _update_report(self):
		if time.time() - self._last_report < 1:
			return
		self._last_report = time.time()
		self._console.move(0, 0)
		self._new_stdout.logged = False
		self._report.display()
		self._new_stdout.logged = True

	def _update_status(self):
		activity_message = None
		for activity in Activity.root.get_children():
			activity_message = activity.getMessage() + '...'
			if len(activity_message) > 75:
				activity_message = activity_message[:37] + '...' + activity_message[-35:]

		self._console.move(self._reportHeight + 1, 0)
		self._new_stdout.logged = False
		if self._old_message:
			self._stored_stdout.write(self._old_message.center(65) + '\r')
			self._stored_stdout.flush()
		self._old_message = activity_message
		if activity_message:
			self._stored_stdout.write('%s' % activity_message.center(65))
			self._stored_stdout.flush()
		self._new_stdout.logged = True

	def _update_log(self):
		self._console.move(self._reportHeight + 2, 0)
		self._console.eraseDown()
		self._new_stdout.dump()

	def _update_all(self):
		self._last_report = 0
		self._update_report()
		self._update_status()
		self._update_log()

	def _schedule_update_report_status(self):
		self._draw(self._update_report)
		self._draw(self._update_status)

	def displayWorkflow(self):
		if not sys.stdout.isatty():
			return self._workflow.process(self._wait)

		self._console = Console(sys.stdout)
		self._new_stdout = GUIStream(sys.stdout, self._console, self._lock)
		self._new_stderr = GUIStream(sys.stderr, self._console, self._lock)
		Activity.callbacks.append(self._schedule_update_report_status)
		try:
			# Main cycle - GUI mode
			(sys.stdout, sys.stderr) = (self._new_stdout, self._new_stderr)
			self._console.erase()
			self._schedule_update_layout()
			self._workflow.process(self._wait)
		finally:
			(sys.stdout, sys.stderr) = (self._stored_stdout, self._stored_stderr)
			self._console.setscrreg()
			self._console.erase()
			self._update_all()
Exemple #23
0
	def __init__(self):
		logging.Handler.__init__(self)
		self._lock = GCLock(threading.RLock())
 def __init__(self, num_jobs):
     self._result = {}
     self._lock = GCLock()
     self._num_jobs = num_jobs
Exemple #25
0
        for child in list(self._children):
            child.finish()
        if self._parent:
            self._parent.remove_child(self)
        running_list = Activity.running_by_thread_name.get(
            self._current_thread_name, [])
        if self in running_list:
            running_list.remove(self)

    def __del__(self):
        self.finish()

    def get_parents(self):
        if self._parent is not None:
            for parent in self._parent.get_parents():
                yield parent
            yield self._parent

    def get_children(self):
        for child in self._children:
            yield child
            for subchild in child.get_children():
                yield subchild


Activity.lock = GCLock()
Activity.counter = 0
Activity.running_by_thread_name = {}
Activity.callbacks = []
Activity.root = Activity('Running grid-control', name='root')
Exemple #26
0
class ANSIGUI(GUI):
    def __init__(self, config, workflow):
        config.set('report', 'BasicReport BarReport')
        (self._stored_stdout, self._stored_stderr) = (sys.stdout, sys.stderr)
        GUI.__init__(self, config, workflow)
        self._reportHeight = 0
        self._statusHeight = 1
        self._old_message = None
        self._lock = GCLock(threading.RLock())  # drawing lock
        self._last_report = 0
        self._old_size = None

    def _draw(self, fun):
        new_size = self._console.getmaxyx()
        if self._old_size != new_size:
            self._old_size = new_size
            self._schedule_update_layout()
        self._lock.acquire()
        self._console.hideCursor()
        self._console.savePos()
        try:
            fun()
        finally:
            self._console.loadPos()
            self._console.showCursor()
            self._lock.release()

    # Event handling for resizing
    def _update_layout(self):
        (sizey, sizex) = self._console.getmaxyx()
        self._old_size = (sizey, sizex)
        self._reportHeight = self._report.getHeight()
        self._console.erase()
        self._console.setscrreg(
            min(self._reportHeight + self._statusHeight + 1, sizey), sizey)
        utils.printTabular.wraplen = sizex - 5
        self._update_all()

    def _schedule_update_layout(self, sig=None, frame=None):
        start_thread(
            'update layout', self._draw,
            self._update_layout)  # using new thread to ensure RLock is free

    def _wait(self, timeout):
        oldHandler = signal.signal(signal.SIGWINCH,
                                   self._schedule_update_layout)
        result = utils.wait(timeout)
        signal.signal(signal.SIGWINCH, oldHandler)
        return result

    def _update_report(self):
        if time.time() - self._last_report < 1:
            return
        self._last_report = time.time()
        self._console.move(0, 0)
        self._new_stdout.logged = False
        self._report.display()
        self._new_stdout.logged = True

    def _update_status(self):
        activity_message = None
        for activity in Activity.root.get_children():
            activity_message = activity.getMessage() + '...'
            if len(activity_message) > 75:
                activity_message = activity_message[:
                                                    37] + '...' + activity_message[
                                                        -35:]

        self._console.move(self._reportHeight + 1, 0)
        self._new_stdout.logged = False
        if self._old_message:
            self._stored_stdout.write(self._old_message.center(65) + '\r')
            self._stored_stdout.flush()
        self._old_message = activity_message
        if activity_message:
            self._stored_stdout.write('%s' % activity_message.center(65))
            self._stored_stdout.flush()
        self._new_stdout.logged = True

    def _update_log(self):
        self._console.move(self._reportHeight + 2, 0)
        self._console.eraseDown()
        self._new_stdout.dump()

    def _update_all(self):
        self._last_report = 0
        self._update_report()
        self._update_status()
        self._update_log()

    def _schedule_update_report_status(self):
        self._draw(self._update_report)
        self._draw(self._update_status)

    def displayWorkflow(self):
        if not sys.stdout.isatty():
            return self._workflow.process(self._wait)

        self._console = Console(sys.stdout)
        self._new_stdout = GUIStream(sys.stdout, self._console, self._lock)
        self._new_stderr = GUIStream(sys.stderr, self._console, self._lock)
        Activity.callbacks.append(self._schedule_update_report_status)
        try:
            # Main cycle - GUI mode
            (sys.stdout, sys.stderr) = (self._new_stdout, self._new_stderr)
            self._console.erase()
            self._schedule_update_layout()
            self._workflow.process(self._wait)
        finally:
            (sys.stdout, sys.stderr) = (self._stored_stdout,
                                        self._stored_stderr)
            self._console.setscrreg()
            self._console.erase()
            self._update_all()