def _choices_as_array(self): from six import StringIO valuebuffer = StringIO(self.list_values) choices = [[item.strip(), item.strip()] for item in valuebuffer.readlines()] valuebuffer.close() return choices
def _call_git_command(self, command, input_file=None, output_file=None): """ Class calls git command :param command: git command which is executed :param directory: git directory :param input_file: input file for git operations :return: ret_code and output of git command """ cmd = [] cmd.append(self.GIT) cmd.extend(['-c', 'user.name={}'.format(self.GIT_USER_NAME)]) cmd.extend(['-c', 'user.email={}'.format(self.GIT_USER_EMAIL)]) cmd.extend(command) self.output_data = [] if not output_file: output = StringIO() elif output_file == '-': output = None else: output = output_file ret_code = ProcessHelper.run_subprocess_cwd(cmd, cwd=self.git_directory, input=input_file, output=output) if not output_file: out = output.readlines() for o in out: self.output_data.append(o.strip()) return ret_code
def test_subchapoverview(self): auth.login_user(db.auth_user(11)) session.auth = auth request.vars.tablekind = 'sccount' res = subchapoverview() self.assertIsNotNone(res) soup = BeautifulSoup(res['summary']) thlist = soup.select('th') self.assertEqual(thlist[11].text, 'user_1671') rl = soup.select('tr') cl = rl[10].select('td') self.assertEqual(cl[5].text, '4.0') self.assertEqual(cl[17].text, '6.0') request.vars.action = 'tocsv' request.vars.tablekind = 'dividmin' res = subchapoverview() csvf = StringIO(res) rows = csvf.readlines() cols = rows[18].split(',') print(cols) self.assertEqual(cols[0], ' ') self.assertEqual(cols[2], 'ch12_dict11') self.assertEqual(cols[-3].strip(), '2017-10-26 22:25:38') self.assertEqual(cols[-1].strip(), 'Aliasingandcopying') cols = rows[122].split(',') print(cols) self.assertEqual(cols[0], ' ') self.assertEqual(cols[3], '2017-08-30 22:29:30')
def _call_git_command(self, command, input_file=None, output_file=None): """ Class calls git command :param command: git command which is executed :param directory: git directory :param input_file: input file for git operations :return: ret_code and output of git command """ cmd = [] cmd.append(self.GIT) cmd.extend(command) self.output_data = [] if not output_file: output = StringIO() else: output = output_file ret_code = ProcessHelper.run_subprocess_cwd(cmd, cwd=self.git_directory, input=input_file, output=output) if not output_file: out = output.readlines() for o in out: self.output_data.append(o.strip().encode(defenc)) return ret_code
class _File(object): """ A file like object representing a file in git @todo: We don't support any byte ranges yet. """ def __init__(self, content): self._iter = iter self._data = StringIO(content) def readline(self): return self._data.readline() def readlines(self): return self._data.readlines() def read(self, size=None): return self._data.read(size) def close(self): return self._data.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close()
def fold_cell(self, cell, folded): """ Remove folded lines and add a '<->' at the parent line """ f = StringIO(cell) lines = f.readlines() if folded[0] == 0 and (lines[0][0] == '#' or lines[0][0] == '%') : # fold whole cell when first line is a comment or magic return lines[0].rstrip('\n') + '↔\n' fold_indent = 0 fold = False fcell = "" for i, l in enumerate(lines): # fold indent level indent = len(l)-len(l.lstrip(' ')) if indent <= fold_indent: fold = False fold_indent = 0 if i in folded: fold = True fold_indent = indent fcell += l.rstrip('\n') + '↔\n' if fold is False: fcell += l return fcell
def test_api_key_should_be_revoked(self): user = user_factory(id=67890) # The test csv contains an entry with this user and the "right" secret. right_secret = ( 'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26') apikey = APIKey.objects.create( key='user:{}:{}'.format(user.pk, '333'), secret=right_secret, user=user, is_active=True) stdout = StringIO() call_command('revoke_api_keys', self.csv_path, stdout=stdout) stdout.seek(0) output = stdout.readlines() assert output[0] == ( 'Ignoring APIKey user:12345:666, it does not exist.\n') assert output[1] == ( 'Revoked APIKey user:67890:333.\n') assert output[2] == ( 'Ignoring APIKey garbage, it does not exist.\n') assert output[3] == ( 'Done. Revoked 1 keys out of 3 entries.\n') # API key is now inactive, secret hasn't changed, the other user api # key is still there, there are no additional APIKeys. apikey.reload() assert apikey.secret == right_secret assert apikey.is_active is None assert APIKey.objects.filter(user=user).count() == 2 assert APIKey.objects.filter(user=user, is_active=True).count() == 1
def test_filter_encrypted(self): stdout = StringIO() with patch('sys.stdout', stdout): execute_from_command_line(['', 'listbackups', '--encrypted', '-q']) stdout.seek(0) stdout.readline() for line in stdout.readlines(): self.assertIn('.gpg', line)
def test_filter_media(self): stdout = StringIO() with patch('sys.stdout', stdout): execute_from_command_line( ['', 'listbackups', '--content-type', 'media', '-q']) stdout.seek(0) stdout.readline() for line in stdout.readlines(): self.assertIn('.tar', line)
class GbpLogTester(object): """ Helper class for tests that need to capture logging output """ def __init__(self): """Object initialization""" self._log = None self._loghandler = None def _capture_log(self, capture=True): """ Capture log""" if capture: assert self._log is None, "Log capture already started" self._log = StringIO() self._loghandler = gbp.log.GbpStreamHandler(self._log, False) self._loghandler.addFilter(gbp.log.GbpFilter([gbp.log.WARNING, gbp.log.ERROR])) handlers = list(gbp.log.LOGGER.handlers) for hdl in handlers: gbp.log.LOGGER.removeHandler(hdl) gbp.log.LOGGER.addHandler(self._loghandler) else: assert self._log is not None, "Log capture not started" gbp.log.LOGGER.removeHandler(self._loghandler) self._loghandler.close() self._loghandler = None self._log.close() self._log = None def _get_log(self): """Get the captured log output""" self._log.seek(0) return self._log.readlines() def _check_log_empty(self): """Check that nothig was logged""" output = self._get_log() ok_(output == [], "Log is not empty: %s" % output) def _check_log(self, linenum, regex): """Check that the specified line on log matches expectations""" if self._log is None: raise Exception("BUG in unittests: no log captured!") log = self._get_log() assert_less(linenum, len(log), "Not enough log lines: %d" % len(log)) output = self._get_log()[linenum].strip() ok_(re.match(regex, output), "Log entry '%s' doesn't match '%s'" % (output, regex)) def _clear_log(self): """Clear the mock strerr""" if self._log is not None: self._log.seek(0) self._log.truncate()
class GbpLogTester(object): """ Helper class for tests that need to capture logging output """ def __init__(self): """Object initialization""" self._log = None self._loghandler = None def _capture_log(self, capture=True): """ Capture log""" if capture: assert self._log is None, "Log capture already started" self._log = StringIO() self._loghandler = gbp.log.GbpStreamHandler(self._log, False) self._loghandler.addFilter( gbp.log.GbpFilter([gbp.log.WARNING, gbp.log.ERROR])) handlers = list(gbp.log.LOGGER.handlers) for hdl in handlers: gbp.log.LOGGER.removeHandler(hdl) gbp.log.LOGGER.addHandler(self._loghandler) else: assert self._log is not None, "Log capture not started" gbp.log.LOGGER.removeHandler(self._loghandler) self._loghandler.close() self._loghandler = None self._log.close() self._log = None def _get_log(self): """Get the captured log output""" self._log.seek(0) return self._log.readlines() def _check_log_empty(self): """Check that nothig was logged""" output = self._get_log() ok_(output == [], "Log is not empty: %s" % output) def _check_log(self, linenum, regex): """Check that the specified line on log matches expectations""" if self._log is None: raise Exception("BUG in unittests: no log captured!") log = self._get_log() assert_less(linenum, len(log), "Not enough log lines: %d" % len(log)) output = self._get_log()[linenum].strip() ok_(re.match(regex, output), "Log entry '%s' doesn't match '%s'" % (output, regex)) def _clear_log(self): """Clear the mock strerr""" if self._log is not None: self._log.seek(0) self._log.truncate()
def get_doctype(data): """ return the public id for the doctype given some raw xml data """ if not hasattr(data, 'readlines'): data = NativeStringIO(data) for line in data.readlines(): line = line.strip() if not line: continue if line.startswith('<?xml') or line.startswith('<!-- '): continue m = DT_RGX.match(line) if m is not None: return m.group(1) else: raise DTException('Unable to match doctype in "%s"' % line)
def test_api_key_does_not_exist(self): user = user_factory() # The test csv does not contain an entry for this user. apikey = APIKey.new_jwt_credentials(user=user) old_secret = apikey.secret stdout = StringIO() call_command('revoke_api_keys', self.csv_path, stdout=stdout) stdout.seek(0) output = stdout.readlines() assert output[0] == ( 'Ignoring APIKey user:12345:666, it does not exist.\n') assert output[1] == ( 'Ignoring APIKey user:67890:333, it does not exist.\n') # APIKey is still active, secret hasn't changed, there are no # additional APIKeys. apikey.reload() assert apikey.secret == old_secret assert apikey.is_active assert APIKey.objects.filter(user=user).count() == 1
def test_api_key_has_wrong_secret(self): user = user_factory(id=12345) # The test csv contains an entry with this user and the "wrong" secret. right_secret = ( 'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26') apikey = APIKey.objects.create( key='user:{}:{}'.format(user.pk, '666'), secret=right_secret, user=user, is_active=True) stdout = StringIO() call_command('revoke_api_keys', self.csv_path, stdout=stdout) stdout.seek(0) output = stdout.readlines() assert output[0] == ( 'Ignoring APIKey user:12345:666, secret differs.\n') assert output[1] == ( 'Ignoring APIKey user:67890:333, it does not exist.\n') # API key is still active, secret hasn't changed, there are no # additional APIKeys. apikey.reload() assert apikey.secret == right_secret assert apikey.is_active assert APIKey.objects.filter(user=user).count() == 1
class FormatterTest(unittest.TestCase): def setUp(self): self.position = 0 self.logger = structuredlog.StructuredLogger("test_%s" % type(self).__name__) self.output_file = StringIO() self.handler = handlers.StreamHandler(self.output_file, self.get_formatter()) self.logger.add_handler(self.handler) def set_position(self, pos=None): if pos is None: pos = self.output_file.tell() self.position = pos def get_formatter(self): raise NotImplementedError( "FormatterTest subclasses must implement get_formatter") @property def loglines(self): self.output_file.seek(self.position) return [line.rstrip() for line in self.output_file.readlines()]
class FormatterTest(unittest.TestCase): def setUp(self): self.position = 0 self.logger = structuredlog.StructuredLogger( "test_%s" % type(self).__name__) self.output_file = StringIO() self.handler = handlers.StreamHandler( self.output_file, self.get_formatter()) self.logger.add_handler(self.handler) def set_position(self, pos=None): if pos is None: pos = self.output_file.tell() self.position = pos def get_formatter(self): raise NotImplementedError( "FormatterTest subclasses must implement get_formatter") @property def loglines(self): self.output_file.seek(self.position) return [line.rstrip() for line in self.output_file.readlines()]
def parse(self, name, base_mark, io): if not hasattr(io, 'readlines'): io = StringIO(io) def mark(line, column=0): return base_mark.merge(Mark('', line, column)) errors = [] current_section_name = ConfigSectionName(mark(0), mark(0), '') current_param_name = None current_param_value = None current_param_delimiter = None sections = [] parameters = [] line_number = -1 for line in io.readlines(): line = line.rstrip() line_number += 1 if current_param_name \ and (current_param_value.quotechar or (line == '' or not line[0].isspace())): param = ConfigParameter( current_param_name.start_mark, current_param_value.end_mark, current_param_name, current_param_value, current_param_delimiter) parameters.append(param) current_param_name = None current_param_value = None current_param_delimiter = None if line == '': continue if line[0] in '#;': continue if line[0].isspace(): if current_param_name: current_param_value.end_mark = mark(line_number, len(line)) current_param_value.text += line.lstrip() continue else: errors.append( ParseError('Unexpected multiline value continuation', mark(line_number))) continue if line[0] == '[': end_index = line.find(']') if end_index == -1: errors.append( ParseError('Unclosed section', mark(line_number, len(line)))) end_index = len(line) while line[end_index - 1].isspace(): end_index -= 1 if end_index <= 1: errors.append( ParseError('Missing section name', mark(line_number))) continue else: i = end_index + 1 while i < len(line): if not line[i].isspace(): errors.append( ParseError('Extra chars after section name', mark(line_number, i))) break i += 1 if current_section_name.text != '' or len(parameters) > 0: section = ConfigSection( current_section_name.start_mark, mark(line_number), current_section_name, parameters) sections.append(section) parameters = [] current_section_name = ConfigSectionName( mark(line_number, 0), mark(line_number, end_index), line[1:end_index] ) else: m = self.key_value_re.match(line) if m: current_param_name = ConfigParameterName( mark(line_number, m.start(1)), mark(line_number, m.end(1)), m.group(1) ) current_param_delimiter = TextElement( mark(line_number, m.start(2)), mark(line_number, m.end(2)), m.group(2) ) # Unquote value value = m.group(3) quotechar = None if len(value) > 0 and (value[0] == value[-1] and value[0] in "\"'"): quotechar = value[0] value = value[1:-1] current_param_value = ConfigParameterValue( mark(line_number, m.start(3)), mark(line_number, m.end(3)), value, quotechar=quotechar ) else: errors.append( ParseError('Syntax error in line "%s"' % line, mark(line_number))) if current_param_name: param = ConfigParameter( current_param_name.start_mark, current_param_value.end_mark, current_param_name, current_param_value, current_param_delimiter) parameters.append(param) if current_section_name.text != '' or len(parameters) > 0: section = ConfigSection( current_section_name.start_mark, mark(line_number), current_section_name, parameters) sections.append(section) parameters = [] end_mark = base_mark if len(sections) > 0: end_mark = base_mark.merge(sections[-1].end_mark) config = ComponentConfig(base_mark, end_mark, name, sections, errors) return config
class NormalHTTPFile(HTTPFile): def __init__(self, path, devid, backup_dests=None, mg=None, fid=None, cls=None, key=None, create_close_arg=None, **kwds): super(NormalHTTPFile, self).__init__(mg, fid, key, cls, create_close_arg) if backup_dests is None: backup_dests = [] self._fp = StringIO() self._paths = [(devid, path)] + list(backup_dests) self._is_closed = 0 def paths(self): return self._paths def read(self, n=-1): return self._fp.read(n) def readline(self, *args, **kwds): return self._fp.readline(*args, **kwds) def readlines(self, *args, **kwds): return self._fp.readlines(*args, **kwds) def write(self, content): self._fp.write(content) def close(self): if not self._is_closed: self._is_closed = True # content = self._fp.getvalue() # self._fp.close() for tried_devid, tried_path in self._paths: try: # self._request(tried_path, "PUT", content) self._fp.seek(0) put.putfile(self._fp, tried_path) devid = tried_devid path = tried_path break except HTTPError as e: continue else: devid = None path = None self._fp.seek(0, 2) size = self._fp.tell() self._fp.close() if devid: params = { 'fid': self.fid, 'domain': self.mg.domain, 'key': self.key, 'path': path, 'devid': devid, 'size': size } if self.create_close_arg: params.update(self.create_close_arg) try: self.mg.backend.do_request('create_close', params) except MogileFSError as e: if e.err != 'empty_file': raise def seek(self, pos, mode=0): return self._fp.seek(pos, mode) def tell(self): return self._fp.tell()
def test_readline(d, val): buf = StringIO(val) for line in buf.readlines(): assert line == d.readline()
def test_stream(self): stream = StringIO() create_wps_namelist(self.cfg, stream) stream.seek(0) self.assertEqual(''.join(stream.readlines()), self.expected)
def test_readlines(d, val): buf = StringIO(val) assert buf.readlines() == d.readlines()
class NormalHTTPFile(HTTPFile): def __init__(self, path, devid, backup_dests=None, mg=None, fid=None, cls=None, key=None, create_close_arg=None, **kwds): super(NormalHTTPFile, self).__init__(mg, fid, key, cls, create_close_arg) if backup_dests is None: backup_dests = [] self._fp = StringIO() self._paths = [(devid, path)] + list(backup_dests) self._is_closed = 0 def paths(self): return self._paths def read(self, n= -1): return self._fp.read(n) def readline(self, *args, **kwds): return self._fp.readline(*args, **kwds) def readlines(self, *args, **kwds): return self._fp.readlines(*args, **kwds) def write(self, content): self._fp.write(content) def close(self): if not self._is_closed: self._is_closed = True # content = self._fp.getvalue() # self._fp.close() for tried_devid, tried_path in self._paths: try: # self._request(tried_path, "PUT", content) self._fp.seek(0) put.putfile(self._fp, tried_path) devid = tried_devid path = tried_path break except HTTPError as e: continue else: devid = None path = None self._fp.seek(0, 2) size = self._fp.tell() self._fp.close() if devid: params = { 'fid' : self.fid, 'domain': self.mg.domain, 'key' : self.key, 'path' : path, 'devid' : devid, 'size' : size } if self.create_close_arg: params.update(self.create_close_arg) try: self.mg.backend.do_request('create_close', params) except MogileFSError as e: if e.err != 'empty_file': raise def seek(self, pos, mode=0): return self._fp.seek(pos, mode) def tell(self): return self._fp.tell()