Exemplo n.º 1
0
def tok2tree(value, precedence):
	value = list(value)
	errorStr = str.join('', map(str, value))
	tokens = iter(value)
	token = next(tokens, None)
	tokStack = []
	opStack = []
	prevToken = None

	def clearOPStack(opList):
		while len(opStack) and (opStack[-1][0] in opList):
			operator = opStack.pop()
			tmp = []
			for x in range(len(operator) + 1):
				tmp.append(tokStack.pop())
			tmp.reverse()
			tokStack.append((operator[0], tmp))

	def collectNestedTokens(tokens, left, right, errMsg):
		level = 1
		token = next(tokens, None)
		while token:
			if token == left:
				level += 1
			elif token == right:
				level -= 1
				if level == 0:
					break
			yield token
			token = next(tokens, None)
		if level != 0:
			raise ConfigError(errMsg)

	while token:
		if token == '(':
			tmp = list(collectNestedTokens(tokens, '(', ')', "Parenthesis error: " + errorStr))
			tokStack.append(tok2tree(tmp, precedence))
		elif token == '<':
			tmp = list(collectNestedTokens(tokens, '<', '>', "Parenthesis error: " + errorStr))
			tokStack.append(('ref', tmp))
		elif token == '[':
			tmp = list(collectNestedTokens(tokens, '[', ']', "Parenthesis error: " + errorStr))
			tokStack.append(('lookup', [tokStack.pop(), tok2tree(tmp, precedence)]))
		elif token in precedence:
			clearOPStack(precedence[token])
			if opStack and opStack[-1].startswith(token):
				opStack[-1] = opStack[-1] + token
			else:
				opStack.append(token)
		else:
			tokStack.append(token)
		prevToken = token
		token = next(tokens, None)

	clearOPStack(precedence.keys())
	assert(len(tokStack) == 1)
	return tokStack[0]
Exemplo n.º 2
0
def _token_list2token_tree(value):
	token_list = list(value)
	error_template = str.join('', imap(str, token_list))
	token_iter = iter(token_list)
	token = next(token_iter, None)
	token_stack = []
	operator_stack = []
	add_operator = False

	def _collect_nested_tokens(token_iter, left, right, error_msg):
		level = 1
		token = next(token_iter, None)
		while token:
			if token == left:
				level += 1
			elif token == right:
				level -= 1
				if level == 0:
					break
			yield token
			token = next(token_iter, None)
		if level != 0:
			raise ParameterError(error_msg)

	while token:
		if add_operator and (token not in ['*', '+', ',', '[']):
			operator_stack.append('*')
		if token == '(':  # replace tokens between < > with evaluated tree
			tmp = list(_collect_nested_tokens(token_iter, '(', ')', 'Parenthesis error: ' + error_template))
			token_stack.append(_token_list2token_tree(tmp))
		elif token == '<':  # forward raw tokens between < >
			tmp = list(_collect_nested_tokens(token_iter, '<', '>', 'Parenthesis error: ' + error_template))
			token_stack.append(('<>', tmp))
		elif token == '{':  # forward raw tokens between { }
			tmp = list(_collect_nested_tokens(token_iter, '{', '}', 'Parenthesis error: ' + error_template))
			token_stack.append(('{}', tmp))
		elif token == '[':  # pack token_tree in front of [] and token within [] together
			tmp = list(_collect_nested_tokens(token_iter, '[', ']', 'Parenthesis error: ' + error_template))
			token_stack.append(('[]', [token_stack.pop(), _token_list2token_tree(tmp)]))
		elif token == ',':
			_eval_operators('*+', token_stack, operator_stack)
			operator_stack.append(token)
		elif token == '+':
			_eval_operators('*', token_stack, operator_stack)
			operator_stack.append(token)
		elif token == '*':
			operator_stack.append(token)
		else:
			token_stack.append(token)
		add_operator = (token not in ['*', '+', ','])
		token = next(token_iter, None)
	_eval_operators('*+,', token_stack, operator_stack)

	if len(token_stack) != 1:
		raise ParameterError('Invalid stack state detected: %r %r' % (token_stack, operator_stack))
	return token_stack[0]
Exemplo n.º 3
0
def tok2inlinetok(tokens, operatorList):
	token = next(tokens, None)
	lastTokenExpr = None
	while token:
		# insert '*' between two expressions - but not between "<expr> ["
		if lastTokenExpr and token not in (['[', ']', ')', '>', '}'] + operatorList):
			yield '*'
		yield token
		lastTokenExpr = token not in (['[', '(', '<', '{'] + operatorList)
		token = next(tokens, None)
Exemplo n.º 4
0
	def _parse(self, proc):
		status_iter = proc.stdout.iter(self._timeout)
		next(status_iter)
		tmpHead = [CheckInfo.WMSID, 'user', CheckInfo.RAW_STATUS, CheckInfo.QUEUE, 'from', CheckInfo.WN, 'job_name']
		for line in ifilter(identity, status_iter):
			try:
				tmp = line.split()
				job_info = dict(izip(tmpHead, tmp[:7]))
				job_info['submit_time'] = str.join(' ', tmp[7:10])
				yield job_info
			except Exception:
				raise BackendError('Error reading job info:\n%s' % line)
Exemplo n.º 5
0
		def getReorderIterator(mainIter, altIter): # alt source is used if main source contains invalid entries
			for (jobNum, splitInfo, procMode) in mainIter:
				if splitInfo.get(DataSplitter.Invalid, False) or (procMode == ResyncMode.disable):
					extInfo = next(altIter, None)
					while extInfo and extInfo[1].get(DataSplitter.Invalid, False):
						extInfo = next(altIter, None)
					if extInfo:
						yield (jobNum, extInfo[1], ResyncMode.complete) # Overwrite invalid partitions
						continue
				yield (jobNum, splitInfo, procMode)
			for extInfo in altIter:
				yield (None, extInfo[1], ResyncMode.ignore)
def merge(lines):
	my_iter = iter(lines)
	while True:
		value = next(my_iter, None)
		if value is None:
			break
		if not isinstance(value, str):
			yield value
		elif value.lstrip().startswith('def ') or value.lstrip().startswith('class '):
			next_value = next(my_iter, None)
			assert next_value is not None
			yield (value, next_value)
		else:
			yield value
Exemplo n.º 7
0
	def collectNestedTokens(tokens, left, right, errMsg):
		level = 1
		token = next(tokens, None)
		while token:
			if token == left:
				level += 1
			elif token == right:
				level -= 1
				if level == 0:
					break
			yield token
			token = next(tokens, None)
		if level != 0:
			raise ConfigError(errMsg)
Exemplo n.º 8
0
	def parseStatus(self, status):
		next(status)
		tmpHead = ['id', 'user', 'status', 'queue', 'from', 'dest_host', 'job_name']
		for jobline in itertools.ifilter(lambda x: x != '', status):
			try:
				tmp = jobline.split()
				jobinfo = dict(zip(tmpHead, tmp[:7]))
				jobinfo['submit_time'] = str.join(' ', tmp[7:10])
				jobinfo['dest'] = 'N/A'
				if jobinfo['dest_host'] != '-':
					jobinfo['dest'] = '%s/%s' % (jobinfo['dest_host'], jobinfo['queue'])
				yield jobinfo
			except:
				raise RethrowError('Error reading job info:\n%s' % jobline)
Exemplo n.º 9
0
def DiffLists(oldList, newList, keyFun, changedFkt, isSorted = False):
	(listAdded, listMissing, listChanged) = ([], [], [])
	if not isSorted:
		(newList, oldList) = (sorted(newList, key = keyFun), sorted(oldList, key = keyFun))
	(newIter, oldIter) = (iter(newList), iter(oldList))
	(new, old) = (next(newIter, None), next(oldIter, None))
	while True:
		if (new is None) or (old is None):
			break
		keyNew = keyFun(new)
		keyOld = keyFun(old)
		if keyNew < keyOld: # new[npos] < old[opos]
			listAdded.append(new)
			new = next(newIter, None)
		elif keyNew > keyOld: # new[npos] > old[opos]
			listMissing.append(old)
			old = next(oldIter, None)
		else: # new[npos] == old[opos] according to *active* comparison
			changedFkt(listAdded, listMissing, listChanged, old, new)
			(new, old) = (next(newIter, None), next(oldIter, None))
	while new is not None:
		listAdded.append(new)
		new = next(newIter, None)
	while old is not None:
		listMissing.append(old)
		old = next(oldIter, None)
	return (listAdded, listMissing, listChanged)
Exemplo n.º 10
0
def get_list_difference(list_old, list_new, key_fun, on_matching_fun,
		is_sorted=False, key_fun_sort=None):
	(list_added, list_missing, list_matching) = ([], [], [])
	if not is_sorted:
		list_new = sorted(list_new, key=key_fun_sort or key_fun)
		list_old = sorted(list_old, key=key_fun_sort or key_fun)
	(iter_new, iter_old) = (iter(list_new), iter(list_old))
	(new, old) = (next(iter_new, None), next(iter_old, None))
	while True:
		if (new is None) or (old is None):
			break
		key_new = key_fun(new)
		key_old = key_fun(old)
		if key_new < key_old:  # new[npos] < old[opos]
			list_added.append(new)
			new = next(iter_new, None)
		elif key_new > key_old:  # new[npos] > old[opos]
			list_missing.append(old)
			old = next(iter_old, None)
		else:  # new[npos] == old[opos] according to *active* comparison
			on_matching_fun(list_added, list_missing, list_matching, old, new)
			(new, old) = (next(iter_new, None), next(iter_old, None))
	while new is not None:
		list_added.append(new)
		new = next(iter_new, None)
	while old is not None:
		list_missing.append(old)
		old = next(iter_old, None)
	return (list_added, list_missing, list_matching)
Exemplo n.º 11
0
def get_list_difference(list_old,
                        list_new,
                        key_fun,
                        on_matching_fun,
                        is_sorted=False,
                        key_fun_sort=None):
    (list_added, list_missing, list_matching) = ([], [], [])
    if not is_sorted:
        list_new = sorted(list_new, key=key_fun_sort or key_fun)
        list_old = sorted(list_old, key=key_fun_sort or key_fun)
    (iter_new, iter_old) = (iter(list_new), iter(list_old))
    (new, old) = (next(iter_new, None), next(iter_old, None))
    while True:
        if (new is None) or (old is None):
            break
        key_new = key_fun(new)
        key_old = key_fun(old)
        if key_new < key_old:  # new[npos] < old[opos]
            list_added.append(new)
            new = next(iter_new, None)
        elif key_new > key_old:  # new[npos] > old[opos]
            list_missing.append(old)
            old = next(iter_old, None)
        else:  # new[npos] == old[opos] according to *active* comparison
            on_matching_fun(list_added, list_missing, list_matching, old, new)
            (new, old) = (next(iter_new, None), next(iter_old, None))
    while new is not None:
        list_added.append(new)
        new = next(iter_new, None)
    while old is not None:
        list_missing.append(old)
        old = next(iter_old, None)
    return (list_added, list_missing, list_matching)
Exemplo n.º 12
0
def DiffLists(oldList, newList, keyFun, changedFkt, isSorted=False):
    (listAdded, listMissing, listChanged) = ([], [], [])
    if not isSorted:
        (newList, oldList) = (sorted(newList,
                                     key=keyFun), sorted(oldList, key=keyFun))
    (newIter, oldIter) = (iter(newList), iter(oldList))
    (new, old) = (next(newIter, None), next(oldIter, None))
    while True:
        if (new is None) or (old is None):
            break
        keyNew = keyFun(new)
        keyOld = keyFun(old)
        if keyNew < keyOld:  # new[npos] < old[opos]
            listAdded.append(new)
            new = next(newIter, None)
        elif keyNew > keyOld:  # new[npos] > old[opos]
            listMissing.append(old)
            old = next(oldIter, None)
        else:  # new[npos] == old[opos] according to *active* comparison
            changedFkt(listAdded, listMissing, listChanged, old, new)
            (new, old) = (next(newIter, None), next(oldIter, None))
    while new is not None:
        listAdded.append(new)
        new = next(newIter, None)
    while old is not None:
        listMissing.append(old)
        old = next(oldIter, None)
    return (listAdded, listMissing, listChanged)
Exemplo n.º 13
0
 def _collect_nested_tokens(token_iter, left, right, error_msg):
     level = 1
     token = next(token_iter, None)
     while token:
         if token == left:
             level += 1
         elif token == right:
             level -= 1
             if level == 0:
                 break
         yield token
         token = next(token_iter, None)
     if level != 0:
         raise ParameterError(error_msg)
Exemplo n.º 14
0
	def _collect_nested_tokens(token_iter, left, right, error_msg):
		level = 1
		token = next(token_iter, None)
		while token:
			if token == left:
				level += 1
			elif token == right:
				level -= 1
				if level == 0:
					break
			yield token
			token = next(token_iter, None)
		if level != 0:
			raise ParameterError(error_msg)
Exemplo n.º 15
0
def DiffLists(oldList, newList, cmpFkt, changedFkt, isSorted = False):
	(listAdded, listMissing, listChanged) = ([], [], [])
	if not isSorted:
		(newList, oldList) = (sorted(newList, cmpFkt), sorted(oldList, cmpFkt))
	(newIter, oldIter) = (iter(newList), iter(oldList))
	(new, old) = (next(newIter, None), next(oldIter, None))
	while True:
		if (new == None) or (old == None):
			break
		result = cmpFkt(new, old)
		if result < 0: # new[npos] < old[opos]
			listAdded.append(new)
			new = next(newIter, None)
		elif result > 0: # new[npos] > old[opos]
			listMissing.append(old)
			old = next(oldIter, None)
		else: # new[npos] == old[opos] according to *active* comparison
			changedFkt(listAdded, listMissing, listChanged, old, new)
			(new, old) = (next(newIter, None), next(oldIter, None))
	while new != None:
		listAdded.append(new)
		new = next(newIter, None)
	while old != None:
		listMissing.append(old)
		old = next(oldIter, None)
	return (listAdded, listMissing, listChanged)
Exemplo n.º 16
0
def tok2tree(value, precedence):
	value = list(value)
	errorStr = str.join('', imap(str, value))
	tokens = iter(value)
	token = next(tokens, None)
	tokStack = []
	opStack = []

	def collectNestedTokens(tokens, left, right, errMsg):
		level = 1
		token = next(tokens, None)
		while token:
			if token == left:
				level += 1
			elif token == right:
				level -= 1
				if level == 0:
					break
			yield token
			token = next(tokens, None)
		if level != 0:
			raise ConfigError(errMsg)

	while token:
		if token == '(':
			tmp = list(collectNestedTokens(tokens, '(', ')', "Parenthesis error: " + errorStr))
			tokStack.append(tok2tree(tmp, precedence))
		elif token == '<':
			tmp = list(collectNestedTokens(tokens, '<', '>', "Parenthesis error: " + errorStr))
			tokStack.append(('ref', tmp))
		elif token == '[':
			tmp = list(collectNestedTokens(tokens, '[', ']', "Parenthesis error: " + errorStr))
			tokStack.append(('lookup', [tokStack.pop(), tok2tree(tmp, precedence)]))
		elif token == '{':
			tmp = list(collectNestedTokens(tokens, '{', '}', "Parenthesis error: " + errorStr))
			tokStack.append(('pspace', tmp))
		elif token in precedence:
			clearOPStack(precedence[token], opStack, tokStack)
			if opStack and opStack[-1].startswith(token):
				opStack[-1] = opStack[-1] + token
			else:
				opStack.append(token)
		else:
			tokStack.append(token)
		token = next(tokens, None)

	clearOPStack(precedence.keys(), opStack, tokStack)
	assert(len(tokStack) == 1)
	return tokStack[0]
Exemplo n.º 17
0
def split_advanced(tokens, doEmit, addEmitToken, quotes = ['"', "'"], brackets = ['()', '{}', '[]'], exType = Exception):
	buffer = ''
	emit_empty_buffer = False
	stack_quote = []
	stack_bracket = []
	map_openbracket = dict(map(lambda x: (x[1], x[0]), brackets))
	tokens = iter(tokens)
	token = next(tokens, None)
	while token:
		emit_empty_buffer = False
		# take care of quotations
		if token in quotes:
			if stack_quote and stack_quote[-1] == token:
				stack_quote.pop()
			else:
				stack_quote.append(token)
		if stack_quote:
			buffer += token
			token = next(tokens, None)
			continue
		# take care of parentheses
		if token in map_openbracket.values():
			stack_bracket.append(token)
		if token in map_openbracket.keys():
			if stack_bracket[-1] == map_openbracket[token]:
				stack_bracket.pop()
			else:
				raise ExType('Uneven brackets!')
		if stack_bracket:
			buffer += token
			token = next(tokens, None)
			continue
		# take care of low level splitting
		if not doEmit(token):
			buffer += token
			token = next(tokens, None)
			continue
		if addEmitToken(token):
			buffer += token
		else: # if tokenlist ends with emit token, which is not emited, finish with empty buffer
			emit_empty_buffer = True
		yield buffer
		buffer = ''
		token = next(tokens, None)

	if stack_quote or stack_bracket:
		raise ExType('Brackets / quotes not closed!')
	if buffer or emit_empty_buffer:
		yield buffer
Exemplo n.º 18
0
	def parseStatus(self, status):
		next(status)
		tmpHead = ['id', 'user', 'status', 'queue', 'from', 'dest_host', 'job_name']
		for jobline in status:
			if jobline != '':
				try:
					tmp = jobline.split()
					jobinfo = dict(izip(tmpHead, tmp[:7]))
					jobinfo['submit_time'] = str.join(' ', tmp[7:10])
					jobinfo['dest'] = 'N/A'
					if jobinfo['dest_host'] != '-':
						jobinfo['dest'] = '%s/%s' % (jobinfo['dest_host'], jobinfo['queue'])
					yield jobinfo
				except Exception:
					raise BackendError('Error reading job info:\n%s' % jobline)
Exemplo n.º 19
0
def _iter_resync_infos_valid(resync_info_iter, resync_info_iter_alt):
	# yield valid resync infos from resync_info_iter and resync_info_iter_alt
	# invalid or disabled resync_infos from resync_info_iter are replaced by
	# valid resync_infos from resync_info_iter_alt
	for (partition_num, _, partition, proc_mode) in resync_info_iter:
		if (proc_mode == ResyncMode.disable) or partition.get(DataSplitter.Invalid, False):
			resync_info_added = next(resync_info_iter_alt, None)
			while resync_info_added and resync_info_added[2].get(DataSplitter.Invalid, False):
				resync_info_added = next(resync_info_iter_alt, None)
			if resync_info_added:
				yield (partition_num, None, resync_info_added[2], ResyncMode.complete)
				continue  # Overwrite invalid partitions
		yield (partition_num, None, partition, proc_mode)
	for resync_info_added in resync_info_iter_alt:  # deplete resync_info_iter_alt at the end
		yield (None, None, resync_info_added[2], None)
Exemplo n.º 20
0
 def _parse(self, proc):
     status_iter = proc.stdout.iter(self._timeout)
     next(status_iter)
     tmpHead = [
         CheckInfo.WMSID, 'user', CheckInfo.RAW_STATUS, CheckInfo.QUEUE,
         'from', CheckInfo.WN, 'job_name'
     ]
     for line in ifilter(identity, status_iter):
         try:
             tmp = line.split()
             job_info = dict(izip(tmpHead, tmp[:7]))
             job_info['submit_time'] = str.join(' ', tmp[7:10])
             yield job_info
         except Exception:
             raise BackendError('Error reading job info:\n%s' % line)
Exemplo n.º 21
0
	def _parse(self, proc):
		tmpHead = [CheckInfo.WMSID, 'user', 'group', 'job_name', CheckInfo.QUEUE, 'partition',
			'nodes', 'cpu_time', 'wall_time', 'memory', 'queue_time', CheckInfo.RAW_STATUS]
		status_iter = ifilter(identity, proc.stdout.iter(self._timeout))
		next(status_iter)
		next(status_iter)
		for line in status_iter:
			tmp = lmap(lambda x: x.strip(), line.replace('\x1b(B', '').replace('\x1b[m', '').split())
			job_info = dict(izip(tmpHead, tmp[:12]))
			if len(tmp) > 12:
				job_info['start_time'] = tmp[12]
			if len(tmp) > 13:
				job_info['kill_time'] = tmp[13]
			if len(tmp) > 14:
				job_info[CheckInfo.WN] = tmp[14]
			yield job_info
Exemplo n.º 22
0
    def _splitJobs(self, fileList, eventsPerJob, firstEvent):
        nextEvent = firstEvent
        succEvent = nextEvent + eventsPerJob
        curEvent = 0
        lastEvent = 0
        curSkip = 0
        fileListIter = iter(fileList)
        job = {
            DataSplitter.Skipped: 0,
            DataSplitter.NEntries: 0,
            DataSplitter.FileList: []
        }
        while True:
            if curEvent >= lastEvent:
                try:
                    fileObj = next(fileListIter)
                except StopIteration:
                    if job[DataSplitter.FileList]:
                        yield job
                    break

                nEvents = fileObj[DataProvider.NEntries]
                if nEvents < 0:
                    raise DatasetError(
                        'EventBoundarySplitter does not support files with a negative number of events!'
                    )
                curEvent = lastEvent
                lastEvent = curEvent + nEvents
                curSkip = 0

            if nextEvent >= lastEvent:
                curEvent = lastEvent
                continue

            curSkip += nextEvent - curEvent
            curEvent = nextEvent

            available = lastEvent - curEvent
            if succEvent - nextEvent < available:
                available = succEvent - nextEvent

            if not len(job[DataSplitter.FileList]):
                job[DataSplitter.Skipped] = curSkip

            job[DataSplitter.NEntries] += available
            nextEvent += available

            job[DataSplitter.FileList].append(fileObj[DataProvider.URL])
            if DataProvider.Metadata in fileObj:
                job.setdefault(DataSplitter.Metadata,
                               []).append(fileObj[DataProvider.Metadata])

            if nextEvent >= succEvent:
                succEvent += eventsPerJob
                yield job
                job = {
                    DataSplitter.Skipped: 0,
                    DataSplitter.NEntries: 0,
                    DataSplitter.FileList: []
                }
Exemplo n.º 23
0
    def _partition_block(self, fi_list, events_per_job, entry_first):
        event_next = entry_first
        event_succ = event_next + events_per_job
        event_current = 0
        event_prev = 0
        skip_current = 0
        fi_iter = iter(fi_list)
        proto_partition = {
            DataSplitter.Skipped: 0,
            DataSplitter.NEntries: 0,
            DataSplitter.FileList: []
        }
        while True:
            if event_current >= event_prev:
                fi = next(fi_iter, None)
                if fi is None:
                    if proto_partition[DataSplitter.FileList]:
                        yield proto_partition
                    break

                event_count = fi[DataProvider.NEntries]
                if event_count < 0:
                    raise DatasetError(
                        '%s does not support files with a negative number of events!'
                        % self.__class__.__name__)
                event_current = event_prev
                event_prev = event_current + event_count
                skip_current = 0

            if event_next >= event_prev:
                event_current = event_prev
                continue

            skip_current += event_next - event_current
            event_current = event_next

            available = event_prev - event_current
            if event_succ - event_next < available:
                available = event_succ - event_next

            if not proto_partition[DataSplitter.FileList]:
                proto_partition[DataSplitter.Skipped] = skip_current

            proto_partition[DataSplitter.NEntries] += available
            event_next += available

            proto_partition[DataSplitter.FileList].append(fi[DataProvider.URL])
            if DataProvider.Metadata in fi:
                proto_partition.setdefault(DataSplitter.Metadata, []).append(
                    fi[DataProvider.Metadata])

            if event_next >= event_succ:
                event_succ += events_per_job
                yield proto_partition
                proto_partition = {
                    DataSplitter.Skipped: 0,
                    DataSplitter.NEntries: 0,
                    DataSplitter.FileList: []
                }
Exemplo n.º 24
0
def split_advanced(tokens, doEmit, addEmitToken, quotes = None, brackets = None, exType = Exception):
	buffer = None
	tokens = split_brackets(split_quotes(tokens, quotes, exType), brackets, exType)
	token = next(tokens, None)
	while token:
		if buffer is None:
			buffer = ''
		if doEmit(token):
			yield buffer
			buffer = ''
			if addEmitToken(token):
				yield token
		else:
			buffer += token
		token = next(tokens, None)
	if buffer is not None:
		yield buffer
Exemplo n.º 25
0
def _iter_resync_infos_valid(resync_info_iter, resync_info_iter_alt):
    # yield valid resync infos from resync_info_iter and resync_info_iter_alt
    # invalid or disabled resync_infos from resync_info_iter are replaced by
    # valid resync_infos from resync_info_iter_alt
    for (partition_num, _, partition, proc_mode) in resync_info_iter:
        if (proc_mode == ResyncMode.disable) or partition.get(
                DataSplitter.Invalid, False):
            resync_info_added = next(resync_info_iter_alt, None)
            while resync_info_added and resync_info_added[2].get(
                    DataSplitter.Invalid, False):
                resync_info_added = next(resync_info_iter_alt, None)
            if resync_info_added:
                yield (partition_num, None, resync_info_added[2],
                       ResyncMode.complete)
                continue  # Overwrite invalid partitions
        yield (partition_num, None, partition, proc_mode)
    for resync_info_added in resync_info_iter_alt:  # deplete resync_info_iter_alt at the end
        yield (None, None, resync_info_added[2], None)
Exemplo n.º 26
0
	def _parse(self, proc):
		status_iter = proc.stdout.iter(self._timeout)
		head = lmap(lambda x: x.strip('%').lower(), next(status_iter, '').split())
		for entry in imap(str.strip, status_iter):
			job_info = dict(izip(head, ifilter(lambda x: x != '', entry.split(None, len(head) - 1))))
			job_info[CheckInfo.WMSID] = job_info.pop('pid')
			job_info[CheckInfo.RAW_STATUS] = job_info.pop('stat')
			job_info.update({CheckInfo.QUEUE: 'localqueue', CheckInfo.WN: 'localhost'})
			yield job_info
Exemplo n.º 27
0
def split_advanced(tokens, do_emit, add_emit_token,
		quotes=None, brackets=None, exception_type=Exception):
	buffer = None
	tokens = split_brackets(split_quotes(tokens, quotes, exception_type), brackets, exception_type)
	token = next(tokens, None)
	while token:
		if buffer is None:
			buffer = ''
		if do_emit(token):
			yield buffer
			buffer = ''
			if add_emit_token(token):
				yield token
		else:
			buffer += token
		token = next(tokens, None)
	if buffer is not None:
		yield buffer
Exemplo n.º 28
0
	def _splitJobs(self, fileList, eventsPerJob, firstEvent):
		nextEvent = firstEvent
		succEvent = nextEvent + eventsPerJob
		curEvent = 0
		lastEvent = 0
		curSkip = 0
		fileListIter = iter(fileList)
		job = { DataSplitter.Skipped: 0, DataSplitter.NEntries: 0, DataSplitter.FileList: [] }
		while True:
			if curEvent >= lastEvent:
				try:
					fileObj = next(fileListIter)
				except StopIteration:
					if job[DataSplitter.FileList]:
						yield job
					break

				nEvents = fileObj[DataProvider.NEntries]
				if nEvents < 0:
					raise DatasetError('EventBoundarySplitter does not support files with a negative number of events!')
				curEvent = lastEvent
				lastEvent = curEvent + nEvents
				curSkip = 0

			if nextEvent >= lastEvent:
				curEvent = lastEvent
				continue

			curSkip += nextEvent - curEvent
			curEvent = nextEvent

			available = lastEvent - curEvent
			if succEvent - nextEvent < available:
				available = succEvent - nextEvent

			if not len(job[DataSplitter.FileList]):
				job[DataSplitter.Skipped] = curSkip

			job[DataSplitter.NEntries] += available
			nextEvent += available

			job[DataSplitter.FileList].append(fileObj[DataProvider.URL])
			if DataProvider.Metadata in fileObj:
				job.setdefault(DataSplitter.Metadata, []).append(fileObj[DataProvider.Metadata])

			if nextEvent >= succEvent:
				succEvent += eventsPerJob
				yield job
				job = { DataSplitter.Skipped: 0, DataSplitter.NEntries: 0, DataSplitter.FileList: [] }
Exemplo n.º 29
0
	def _partition_block(self, fi_list, events_per_job, entry_first):
		event_next = entry_first
		event_succ = event_next + events_per_job
		event_current = 0
		event_prev = 0
		skip_current = 0
		fi_iter = iter(fi_list)
		proto_partition = {DataSplitter.Skipped: 0, DataSplitter.NEntries: 0, DataSplitter.FileList: []}
		while True:
			if event_current >= event_prev:
				fi = next(fi_iter, None)
				if fi is None:
					if proto_partition[DataSplitter.FileList]:
						yield proto_partition
					break

				event_count = fi[DataProvider.NEntries]
				if event_count < 0:
					raise DatasetError('%s does not support files with a negative number of events!' %
						self.__class__.__name__)
				event_current = event_prev
				event_prev = event_current + event_count
				skip_current = 0

			if event_next >= event_prev:
				event_current = event_prev
				continue

			skip_current += event_next - event_current
			event_current = event_next

			available = event_prev - event_current
			if event_succ - event_next < available:
				available = event_succ - event_next

			if not proto_partition[DataSplitter.FileList]:
				proto_partition[DataSplitter.Skipped] = skip_current

			proto_partition[DataSplitter.NEntries] += available
			event_next += available

			proto_partition[DataSplitter.FileList].append(fi[DataProvider.URL])
			if DataProvider.Metadata in fi:
				proto_partition.setdefault(DataSplitter.Metadata, []).append(fi[DataProvider.Metadata])

			if event_next >= event_succ:
				event_succ += events_per_job
				yield proto_partition
				proto_partition = {DataSplitter.Skipped: 0, DataSplitter.NEntries: 0, DataSplitter.FileList: []}
Exemplo n.º 30
0
 def parseStatus(self, status):
     head = lmap(lambda x: x.strip('%').lower(), next(status, '').split())
     for entry in imap(str.strip, status):
         jobinfo = dict(
             izip(
                 head,
                 ifilter(lambda x: x != '',
                         entry.split(None,
                                     len(head) - 1))))
         jobinfo.update({
             'id': jobinfo.get('pid'),
             'status': 'R',
             'dest': 'localhost/localqueue'
         })
         yield jobinfo
Exemplo n.º 31
0
 def _parse(self, proc):
     status_iter = proc.stdout.iter(self._timeout)
     head = lmap(lambda x: x.strip('%').lower(),
                 next(status_iter, '').split())
     for entry in imap(str.strip, status_iter):
         job_info = dict(
             izip(
                 head,
                 ifilter(lambda x: x != '',
                         entry.split(None,
                                     len(head) - 1))))
         job_info[CheckInfo.WMSID] = job_info.pop('pid')
         job_info[CheckInfo.RAW_STATUS] = job_info.pop('stat')
         job_info.update({
             CheckInfo.QUEUE: 'localqueue',
             CheckInfo.WN: 'localhost'
         })
         yield job_info
Exemplo n.º 32
0
def collect_and_sort_onelevel(source_iter, do_display=False):
	sort_helper = []

	def _do_sort(sort_helper):
		if sort_helper:
			cls_tree = {
				'Exception'.lower(): ['0'],
				'NestedException'.lower(): ['0'],
				'object'.lower(): ['1'],
			}
			for (defclass, _) in sort_helper:
				try:
					if defclass.lstrip().startswith('class'):
						cls_parts = defclass.lstrip().split(' ')[1].rstrip().rstrip(':').rstrip(')').split('(')
						cls_name = cls_parts[0]
						cls_tree[cls_name.lower()] = cls_tree.get(cls_parts[1].lower(), [cls_parts[1]]) + [cls_name]
				except Exception:
					logging.error('Error while processing %r %r', cls_tree, defclass)
					raise

			for entry in sorted(sort_helper, key=lambda k: keyfun(cls_tree, k)):
				if do_display:
					key = keyfun(cls_tree, entry)
					if (key[1] == 1) and not key[0]:
						logging.warning(key[-1])
				yield entry

	while True:
		value = next(source_iter, None)
		if value is None:
			break
		if isinstance(value, tuple):
			(defclass, src) = value
			sort_helper.append((defclass, list(collect_and_sort_onelevel(merge(src)))))
		else:
			for entry in _do_sort(sort_helper):
				yield entry
			sort_helper = []
			yield value
	for entry in _do_sort(sort_helper):
		yield entry
Exemplo n.º 33
0
	def parseStatus(self, status):
		head = lmap(lambda x: x.strip('%').lower(), next(status, '').split())
		for entry in imap(str.strip, status):
			jobinfo = dict(izip(head, ifilter(lambda x: x != '', entry.split(None, len(head) - 1))))
			jobinfo.update({'id': jobinfo.get('pid'), 'status': 'R', 'dest': 'localhost/localqueue'})
			yield jobinfo
Exemplo n.º 34
0
def _token_list2token_tree(value):
    token_list = list(value)
    error_template = str.join('', imap(str, token_list))
    token_iter = iter(token_list)
    token = next(token_iter, None)
    token_stack = []
    operator_stack = []
    add_operator = False

    def _collect_nested_tokens(token_iter, left, right, error_msg):
        level = 1
        token = next(token_iter, None)
        while token:
            if token == left:
                level += 1
            elif token == right:
                level -= 1
                if level == 0:
                    break
            yield token
            token = next(token_iter, None)
        if level != 0:
            raise ParameterError(error_msg)

    while token:
        if add_operator and (token not in ['*', '+', ',', '[']):
            operator_stack.append('*')
        if token == '(':  # replace tokens between < > with evaluated tree
            tmp = list(
                _collect_nested_tokens(token_iter, '(', ')',
                                       'Parenthesis error: ' + error_template))
            token_stack.append(_token_list2token_tree(tmp))
        elif token == '<':  # forward raw tokens between < >
            tmp = list(
                _collect_nested_tokens(token_iter, '<', '>',
                                       'Parenthesis error: ' + error_template))
            token_stack.append(('<>', tmp))
        elif token == '{':  # forward raw tokens between { }
            tmp = list(
                _collect_nested_tokens(token_iter, '{', '}',
                                       'Parenthesis error: ' + error_template))
            token_stack.append(('{}', tmp))
        elif token == '[':  # pack token_tree in front of [] and token within [] together
            tmp = list(
                _collect_nested_tokens(token_iter, '[', ']',
                                       'Parenthesis error: ' + error_template))
            token_stack.append(
                ('[]', [token_stack.pop(),
                        _token_list2token_tree(tmp)]))
        elif token == ',':
            _eval_operators('*+', token_stack, operator_stack)
            operator_stack.append(token)
        elif token == '+':
            _eval_operators('*', token_stack, operator_stack)
            operator_stack.append(token)
        elif token == '*':
            operator_stack.append(token)
        else:
            token_stack.append(token)
        add_operator = (token not in ['*', '+', ','])
        token = next(token_iter, None)
    _eval_operators('*+,', token_stack, operator_stack)

    if len(token_stack) != 1:
        raise ParameterError('Invalid stack state detected: %r %r' %
                             (token_stack, operator_stack))
    return token_stack[0]