def merge(clazz, lines): 'Merge a sequence of lines into one. Continuation flags are cleared' buf = StringIO() for line in lines: text = string_util.remove_tail(line.text, clazz.CONTINUATION_CHAR) buf.write(text) return text_line(lines[0].line_number, buf.getvalue())
def __str__(self): buf = StringIO() # for section in self._parser.sections(): # for x in self._parser.items(section): # print('HI: {} x={}'.format(section, x)) # assert False self._parser.write(buf) return buf.getvalue().strip() + '\n'
def _buf_to_str(clazz, buf, col_width): col_buf = StringIO() for i in range(0, col_width): c = buf.read(1) if c: col_buf.write(c) else: break return col_buf.getvalue().strip() or None
def to_csv(self, delimiter=',', quotechar='|'): 'Return the table as Filter rows with filter_func.' buf = StringIO() writer = csv.writer(buf, delimiter=delimiter, quotechar=quotechar, quoting=csv.QUOTE_MINIMAL) for row in self._rows: writer.writerow(list(row)) return buf.getvalue()
def replace_all(clazz, text, src_string, dst_string, word_boundary=False, word_boundary_chars=None): 'Replace src_string with dst_string optionally respecting word boundaries.' check.check_string(text) check.check_string(src_string) check.check_string(dst_string) check.check_bool(word_boundary) check.check_set(word_boundary_chars, allow_none=True) spans = clazz.find_all(text, src_string, word_boundary=word_boundary, word_boundary_chars=word_boundary_chars) if not spans: return text last_start = 0 buf = StringIO() last_span = None for span in spans: left = text[last_start:span.start] if left: buf.write(left) buf.write(dst_string) last_start = span.end + 1 last_span = span if last_span: right = text[last_span.end + 1:] buf.write(right) return buf.getvalue()
def add_line_numbers(clazz, text, delimiter='|'): lines = text.split('\n') width = math.trunc(math.log10(len(lines)) + 1) fmt = '%%%dd' % (width) buf = StringIO() for line_number, line in zip(range(1, 1 + len(lines)), lines): buf.write(fmt % (line_number)) buf.write(delimiter) buf.write(str(line)) buf.write('\n') return buf.getvalue()
def to_string(self, strip_comments=False): buf = StringIO() for line in self._lines: buf.write(line.get_text(strip_comments=strip_comments)) buf.write(self._line_break) v = buf.getvalue() if self._ends_with_line_break: if v and v[-1] != self._line_break: buf.write(self._line_break) else: if v and v[-1] == self._line_break: v = v[0:-1] return v
def __str__(self): max_len = 0 for y in range(0, self.height): for x in range(0, self.width): max_len = max(len(str(self._rows[y][x])), max_len) buf = StringIO() for y in range(0, self.height): for x in range(0, self.width): buf.write( string_util.right_justify(str(self._rows[y][x]), max_len)) buf.write(' ') buf.write('\n') return buf.getvalue()
def insert(clazz, text, position, insert_text): 'Insert insert_text into text at position.' check.check_string(text) check.check_int(position) check.check_string(insert_text) buf = StringIO() left = text[0:position] right = text[position:] buf.write(left) buf.write(insert_text) buf.write(right) return buf.getvalue()
def _strip_line_allow_quoted(clazz, text, strip_head=False, strip_tail=False): 'Strip comments from one line allowing for # to appear in quoted strings .' buf = StringIO() for token in string_lexer.tokenize( text, 'comments_strip_line', options=string_lexer_options.KEEP_QUOTES): if token.token_type not in [ string_lexer.TOKEN_DONE, string_lexer.TOKEN_COMMENT ]: buf.write(token.value) return string_util.strip_ends(buf.getvalue(), strip_head=strip_head, strip_tail=strip_tail)
def _spacify(clazz, s): buf = StringIO() for c in s: if c == '\n': buf.write(c) else: buf.write(' ') return buf.getvalue()
def to_string(self, delimiter='=', value_delimiter=';', quote=False): buf = StringIO() first = True for kv in iter(self): if not first: buf.write(value_delimiter) first = False buf.write(kv.to_string(delimiter=delimiter, quote_value=quote)) return buf.getvalue()
def to_string(self, delimiter='\n'): buf = StringIO() first = True for vfs_file_info in iter(self): if not first: buf.write(delimiter) first = False buf.write(str(vfs_file_info)) return buf.getvalue()
def to_string(self): buf = StringIO() first = True for finfo in iter(self): if not first: buf.write('\n') first = False buf.write(str(finfo)) return buf.getvalue()
def to_string(self, delimiter = ' '): buf = StringIO() first = True for req in iter(self): if not first: buf.write(delimiter) first = False buf.write(str(req)) return buf.getvalue()
def _strip_line_disallow_quoted(clazz, text, strip_head=False, strip_tail=False): 'Strip comments from one line disallowing # to appear in quoted strings but much faster.' last_char = None buf = StringIO() found = False for c in text: is_escaping = last_char == '\\' if c == '#' and not is_escaping: found = True break if c != '\\': buf.write(c) last_char = c text = buf.getvalue() return string_util.strip_ends(text, strip_head=strip_head, strip_tail=strip_tail)
def replace_punctuation(clazz, s, replacement): 'Replace punctuation in s with replacement.' buf = StringIO() for c in s: if c in string.punctuation: if replacement: buf.write(replacement) else: buf.write(c) return buf.getvalue()
def __str__(self): buf = StringIO() first = True for key, value in sorted(self.__dict__['_credentials'].items()): if not first: buf.write('; ') first = False buf.write('{}=**********'.format(key)) return buf.getvalue() return '{}:{}'.format(self.username, '**********')
def dumps(d, delimiter='\n'): if not d: return '' buf = StringIO() longest_key = max([len(key) for key in d.keys()]) fmt = '%%%ds: %%s' % (longest_key) for k, v in sorted(d.items()): buf.write(fmt % (k, v)) buf.write(delimiter) return buf.getvalue()
def escape_white_space(clazz, text): last_char = None buf = StringIO() for c in text: is_escaping = last_char == '\\' if c.isspace() and not is_escaping: buf.write('\\') buf.write(c) last_char = c return buf.getvalue()
def to_text(self, style): self._check_style(style) buf = StringIO() for key, value in sorted(self._properties.items()): buf.write(self._key_value_to_str(style, key, value)) buf.write('\n') value = buf.getvalue().strip() + '\n' if value == '\n': value = '' return value
def to_string(self, delimiter=';', quote=False): buf = StringIO() first = True for s in self._values: if not first: buf.write(delimiter) first = False if quote: s = string_util.quote_if_needed(s) buf.write(s) return buf.getvalue()
def replace_white_space(clazz, s, replacement): 'Replace white space sequences in s with replacement.' buf = StringIO() STATE_CHAR = 1 STATE_SPACE = 2 state = STATE_CHAR for c in s: if state == STATE_CHAR: if c.isspace(): buf.write(replacement) state = STATE_SPACE else: buf.write(c) elif state == STATE_SPACE: if not c.isspace(): buf.write(c) state = STATE_CHAR return buf.getvalue()
def to_string(self, sort=False, fixed_key_column_width=False): buf = StringIO() sections = self._sections if not sort else sorted(self._sections) for i, section in enumerate(sections): if i != 0: buf.write(line_break.DEFAULT_LINE_BREAK) buf.write( section.to_string( entry_formatter=self._entry_formatter, sort=sort, fixed_key_column_width=fixed_key_column_width)) return buf.getvalue()
def to_text(self, formatter): buf = StringIO() for key, value in sorted(self.values().items()): formatted_value = formatter.value_to_text(key, value) formatted_key_value = formatter.key_value_to_text( key, formatted_value) buf.write(formatted_key_value) buf.write('\n') result = buf.getvalue().strip() result = result + '\n' if result == '\n': result = '' return result
def list_processes(clazz): 'List all processes.' # tasklist fields: # "Image Name" # "PID" # "Session Name" # "Session#" # "Mem Usage" # "Status" # "User Name" # "CPU Time" # "Window Title" rv = execute.execute('tasklist /V /NH /FO csv') stream = StringIO(rv.stdout) reader = csv.reader(stream, delimiter=',') result = [] for row in reader: row = row[:] image_name = row.pop(0) pid = row.pop(0) session_name = row.pop(0) session_number = row.pop(0) mem_usage = row.pop(0) status = row.pop(0) user_name = clazz._fix_na_strings(row.pop(0)) cpu_time = row.pop(0) window_title = clazz._fix_na_strings(row.pop(0)) other = { 'window_title': window_title, 'status': status, 'session_number': session_number, 'session_name': session_name, } info = process_info(user_name, pid, cpu_time, mem_usage, image_name, other) result.append(info) return result
def buffer_reset(self, c=None): self._buffer = StringIO() if c: self.buffer_write(c)
class upstream_version_lexer(object): TOKEN_DONE = 'done' TOKEN_NUMBER = 'number' TOKEN_PUNCTUATION = 'punctuation' TOKEN_SPACE = 'space' TOKEN_STRING = 'string' TOKEN_TEXT = 'text' EOS = '\0' def __init__(self, log_tag): log.add_logging(self, tag=log_tag) self._buffer = None self.STATE_BEGIN = _state_begin(self) self.STATE_DONE = _state_done(self) self.STATE_NUMBER = _state_number(self) self.STATE_PUNCTUATION = _state_punctuation(self) self.STATE_TEXT = _state_text(self) self.state = self.STATE_BEGIN def _run(self, text): self.log_d('_run() text=\"%s\")' % (text)) assert self.EOS not in text self.position = point(1, 1) for c in self._chars_plus_eos(text): cr = self._char_type(c) if cr.ctype == self._char_types.UNKNOWN: raise RuntimeError('unknown character: \"%s\"' % (c)) tokens = self.state.handle_char(cr) for token in tokens: self.log_d('tokenize: new token: %s' % (str(token))) yield token self.position = point(self.position.x + 0, self.position.y) assert self.state == self.STATE_DONE yield lexer_token(self.TOKEN_DONE, None, self.position) @classmethod def tokenize(clazz, text, log_tag): return clazz(log_tag)._run(text) @classmethod def _char_to_string(clazz, c): if c == clazz.EOS: return 'EOS' else: return c def change_state(self, new_state, cr): assert new_state if new_state == self.state: return self.log_d( 'transition: %20s -> %-20s; %s' % (self.state.__class__.__name__, new_state.__class__.__name__, new_state._make_log_attributes(cr, include_state=False))) self.state = new_state @classmethod def _chars_plus_eos(self, text): for c in text: yield c yield self.EOS def make_token_text(self): return lexer_token(self.TOKEN_TEXT, self.buffer_value(), self.position) def make_token_number(self): return lexer_token(self.TOKEN_NUMBER, int(self.buffer_value()), self.position) def make_token_punctuation(self): return lexer_token(self.TOKEN_PUNCTUATION, self.buffer_value(), self.position) def buffer_reset(self, c=None): self._buffer = StringIO() if c: self.buffer_write(c) def buffer_reset_with_quote(self, c): assert c in [self.SINGLE_QUOTE_CHAR, self.DOUBLE_QUOTE_CHAR] self.buffer_reset() self.buffer_write_quote(c) def buffer_write(self, c): assert c != self.EOS self._buffer.write(c) def buffer_value(self): return self._buffer.getvalue() def buffer_write_quote(self, c): assert c in [self.SINGLE_QUOTE_CHAR, self.DOUBLE_QUOTE_CHAR] if self._keep_quotes: if self._escape_quotes: self.buffer_write('\\') self.buffer_write(c) class _char_types(IntEnum): EOS = 1 NUMBER = 2 PUNCTUATION = 3 TEXT = 4 UNKNOWN = 5 _char_result = namedtuple('_char_result', 'char, ctype') @classmethod def _char_type(clazz, c): if c in string.punctuation: return clazz._char_result(clazz._char_to_string(c), clazz._char_types.PUNCTUATION) elif c.isdigit(): return clazz._char_result(clazz._char_to_string(c), clazz._char_types.NUMBER) elif c.isalpha(): return clazz._char_result(clazz._char_to_string(c), clazz._char_types.TEXT) elif c == clazz.EOS: return clazz._char_result(clazz._char_to_string(c), clazz._char_types.EOS) else: return clazz._char_result(clazz._char_to_string(c), clazz._char_types.UNKNOWN)
def instructions(self, env): buf = StringIO() buf.write('#!/bin/bash\n') buf.write('echo "----1----"\n') buf.write('declare -px\n') buf.write('echo "----2----"\n') for f in self.files_abs: buf.write('source \"%s\"\n' % (f)) buf.write('echo "----3----"\n') buf.write('declare -px\n') buf.write('echo "----4----"\n') script = temp_file.make_temp_file(content=buf.getvalue(), delete=not self._debug) if self._debug: sys.stdout.write('env_dir: script=%s\n' % (script)) sys.stdout.flush() os.chmod(script, 0o755) try: rv = execute.execute(script, raise_error=True, shell=True, env=env) finally: if not self._debug: file_util.remove(script) parser = text_line_parser(rv.stdout) if self._debug: sys.stdout.write('env_dir: stdout=%s\n' % (rv.stdout)) sys.stdout.write('env_dir: stderr=%s\n' % (rv.stderr)) sys.stdout.flush() if rv.stderr: raise RuntimeError(rv.stderr) env1 = self._parse_env_lines(parser.cut_lines('----1----', '----2----')) env2 = self._parse_env_lines(parser.cut_lines('----3----', '----4----')) delta = self._env_delta(env1, env2) instructions = [] for key in delta.added: instructions.append(instruction(key, env2[key], action.SET)) for key in delta.removed: instructions.append(instruction(key, None, action.UNSET)) for key in delta.changed: value1 = env1[key] value2 = env2[key] for inst in self._determine_change_instructions( key, value1, value2): instructions.append(inst) return sorted(instructions, key=lambda x: (x.key, x.value))
def to_string(self, entry_formatter = None, sort = False, fixed_key_column_width = False): entry_formatter = entry_formatter or self.default_entry_formatter buf = StringIO() buf.write(str(self.header_)) buf.write('\n') entries = self.entries_ if not sort else sorted(self.entries_) key_column_width = 0 if fixed_key_column_width: for entry in entries: this_len = len(entry.value.key) if this_len > key_column_width: key_column_width = this_len for i, entry in enumerate(entries): if i != 0: buf.write('\n') buf.write(' ') buf.write(entry_formatter(entry, key_column_width = key_column_width)) buf.write('\n') return buf.getvalue()