Пример #1
0
 def test_import_ignore(self):
     output = StringIO()
     call_command(
         'import_json',
         '--main-component', 'test',
         '--project', 'test',
         TEST_COMPONENTS,
         stdout=output
     )
     self.assertIn(
         'Imported Test/Gettext PO with 3 translations',
         output.getvalue()
     )
     output.truncate()
     call_command(
         'import_json',
         '--main-component', 'test',
         '--project', 'test',
         '--ignore',
         TEST_COMPONENTS,
         stderr=output
     )
     self.assertIn(
         'Component Test/Gettext PO already exists',
         output.getvalue()
     )
Пример #2
0
 def csv_iter():
     rows = iter(table)
     fo = IO()
     csv_writer = csv.writer(fo)
     csv_writer.writerow(converter.header2())
     while True:
         try:
           for _ in range(1000):
             row = next(rows)
             #print row
             csv_writer.writerow(row)
         except StopIteration:
             fo.seek(0)
             yield fo.read().encode('utf-8')
             del fo
             break
         fo.seek(0)
         data = fo.read().encode('utf-8')
         fo.seek(0)
         fo.truncate()
         yield data
     if converter.errors:
         yield 'The following errors were found at unspecified points in processing:\n'
         for error in converter.errors:
             yield str(error)+'\n'
Пример #3
0
 def test_import_ignore(self):
     output = StringIO()
     call_command(
         'import_json',
         '--main-component', 'test',
         '--project', 'test',
         TEST_COMPONENTS,
         stdout=output
     )
     self.assertIn(
         'Imported Test/Gettext PO with 3 translations',
         output.getvalue()
     )
     output.truncate()
     call_command(
         'import_json',
         '--main-component', 'test',
         '--project', 'test',
         '--ignore',
         TEST_COMPONENTS,
         stderr=output
     )
     self.assertIn(
         'Component Test/Gettext PO already exists',
         output.getvalue()
     )
Пример #4
0
 def test_call(self):
     ident = self.base_identity_class()
     sav_req = pyos.http.request
     pyos.http.request = Mock()
     sav_debug = ident.http_log_debug
     ident.http_log_debug = True
     uri = "https://%s/%s" % (utils.random_ascii(), utils.random_ascii())
     sav_stdout = sys.stdout
     out = StringIO()
     sys.stdout = out
     utils.add_method(ident, lambda self: "", "_get_auth_endpoint")
     dkv = utils.random_ascii()
     data = {dkv: dkv}
     hkv = utils.random_ascii()
     headers = {hkv: hkv}
     for std_headers in (True, False):
         expected_headers = ident._standard_headers() if std_headers else {}
         expected_headers.update(headers)
         for admin in (True, False):
             ident.method_post(uri, data=data, headers=headers,
                     std_headers=std_headers, admin=admin)
             pyos.http.request.assert_called_with("POST", uri, body=data,
                     headers=expected_headers)
             self.assertEqual(out.getvalue(), "")
             out.seek(0)
             out.truncate()
     out.close()
     pyos.http.request = sav_req
     ident.http_log_debug = sav_debug
     sys.stdout = sav_stdout
Пример #5
0
 def log_chk(self, hdr, level):
     # utility method to check header checking / logging
     # If level == 0, this header should always be OK
     str_io = StringIO()
     logger = logging.getLogger('test.logger')
     handler = logging.StreamHandler(str_io)
     logger.addHandler(handler)
     str_io.truncate(0)
     hdrc = hdr.copy()
     if level == 0:  # Should never log or raise error
         logger.setLevel(0)
         hdrc.check_fix(logger=logger, error_level=0)
         assert_equal(str_io.getvalue(), '')
         logger.removeHandler(handler)
         return hdrc, '', ()
     # Non zero level, test above and below threshold
     # Logging level above threshold, no log
     logger.setLevel(level + 1)
     e_lev = level + 1
     hdrc.check_fix(logger=logger, error_level=e_lev)
     assert_equal(str_io.getvalue(), '')
     # Logging level below threshold, log appears
     logger.setLevel(level + 1)
     logger.setLevel(level - 1)
     hdrc = hdr.copy()
     hdrc.check_fix(logger=logger, error_level=e_lev)
     assert_true(str_io.getvalue() != '')
     message = str_io.getvalue().strip()
     logger.removeHandler(handler)
     hdrc2 = hdr.copy()
     raiser = (HeaderDataError, hdrc2.check_fix, logger, level)
     return hdrc, message, raiser
Пример #6
0
class UnicodeDictWriter:
    """
    A CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    """
    def __init__(self, f, fieldnames, encoding="utf-8", **kwargs):
        # Redirect output to a queue
        self.fieldnames = fieldnames
        self.queue = StringIO()
        self.writer = csv.writer(self.queue, **kwargs)
        self.stream = f
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writeheader(self):
        self.writer.writerow(self.fieldnames)

    def writerow(self, row):
        self.writer.writerow(
            [six.text_type(row[x]).encode("utf-8") for x in self.fieldnames])
        # Fetch UTF-8 output from the queue ...
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        # ... and reencode it into the target encoding
        data = self.encoder.encode(data)
        # write to the target stream
        self.stream.write(data)
        # empty queue
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Пример #7
0
class UnicodeWriter:
    """
    A CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    """
    def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
        # Redirect output to a queue
        self.queue = StringIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
        self.stream = f
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writerow(self, row):
        self.writer.writerow([s.encode("utf-8") for s in row])
        # Fetch UTF-8 output from the queue ...
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        # ... and reencode it into the target encoding
        data = self.encoder.encode(data)
        # write to the target stream
        self.stream.write(data)
        # empty queue
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Пример #8
0
class UnicodeWriter:

    # delimiter="\t"
    def __init__(self, f, dialect="excel-tab", encoding="utf-8", **kwds):
        # Redirect output to a queue
        self.queue = StringIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
        self.stream = f
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writerow(self, row):
        self.writer.writerow(
            [s.encode("utf-8") if isinstance(s, unicode) else s for s in row])
        # Fetch UTF-8 output from the queue ...
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        # ... and reencode it into the target encoding
        data = self.encoder.encode(data)
        # write to the target stream
        self.stream.write(data)
        # empty queue
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Пример #9
0
 def csv_iter():
     rows = iter(table)
     fo = IO()
     csv_writer = csv.writer(fo)
     csv_writer.writerow(converter.header2())
     while True:
         try:
             for _ in range(1000):
                 row = next(rows)
                 #print row
                 csv_writer.writerow(row)
         except StopIteration:
             fo.seek(0)
             yield fo.read().encode('utf-8')
             del fo
             break
         fo.seek(0)
         data = fo.read().encode('utf-8')
         fo.seek(0)
         fo.truncate()
         yield data
     if converter.errors:
         yield 'The following errors were found at unspecified points in processing:\n'
         for error in converter.errors:
             yield str(error) + '\n'
Пример #10
0
 def write_to_db(self, db, transaction=None, commit=True):
     if transaction is None:
         transaction = db
     fp = StringIO()
     if len(self) < Timeseries.MAX_ALL_BOTTOM:
         top = ''
         middle = None
         self.write(fp)
         bottom = fp.getvalue()
     else:
         dates = sorted(self.keys())
         self.write(fp, end=dates[Timeseries.ROWS_IN_TOP_BOTTOM - 1])
         top = fp.getvalue()
         fp.truncate(0)
         fp.seek(0)
         self.write(fp, start=dates[Timeseries.ROWS_IN_TOP_BOTTOM],
                    end=dates[-(Timeseries.ROWS_IN_TOP_BOTTOM + 1)])
         middle = self.blob_create(
             zlib.compress(fp.getvalue().encode('ascii')))
         fp.truncate(0)
         fp.seek(0)
         self.write(fp, start=dates[-Timeseries.ROWS_IN_TOP_BOTTOM])
         bottom = fp.getvalue()
     fp.close()
     c = db.cursor()
     c.execute("DELETE FROM ts_records WHERE id=%d" % (self.id))
     c.execute("""INSERT INTO ts_records (id, top, middle, bottom)
                  VALUES (%s, %s, %s, %s)""", (self.id, top, middle,
               bottom))
     c.close()
     if commit:
         transaction.commit()
Пример #11
0
def log_chk(hdr, level):
    """ Utility method to check header checking / logging

    Asserts that log entry appears during ``hdr.check_fix`` for logging level
    below `level`.

    Parameters
    ----------
    hdr : instance
        Instance of header class, with methods ``copy`` and check_fix``.  The
        header has some minor error (defect) which can be detected with
        ``check_fix``.
    level : int
        Level (severity) of defect present in `hdr`.  When logging threshold is
        at or below `level`, a message appears in the default log (we test that
        happens).

    Returns
    -------
    hdrc : instance
        Header, with defect corrected.
    message : str
        Message generated in log when defect was detected.
    raiser : tuple
        Tuple of error type, callable, arguments that will raise an exception
        when then defect is detected.  Can be empty.  Check with ``if raiser !=
        (): assert_raises(*raiser)``.
    """
    str_io = StringIO()
    logger = logging.getLogger('test.logger')
    handler = logging.StreamHandler(str_io)
    logger.addHandler(handler)
    str_io.truncate(0)
    hdrc = hdr.copy()
    if level == 0:  # Should never log or raise error
        logger.setLevel(0)
        hdrc.check_fix(logger=logger, error_level=0)
        assert_equal(str_io.getvalue(), '')
        logger.removeHandler(handler)
        return hdrc, '', ()
    # Non zero defect level, test above and below threshold.
    # Set error level above defect level to prevent exception when defect
    # detected.
    e_lev = level + 1
    # Logging level above threshold, no log.
    logger.setLevel(level + 1)
    hdrc.check_fix(logger=logger, error_level=e_lev)
    assert_equal(str_io.getvalue(), '')
    # Logging level below threshold, log appears, store logged message
    logger.setLevel(level - 1)
    hdrc = hdr.copy()
    hdrc.check_fix(logger=logger, error_level=e_lev)
    assert_true(str_io.getvalue() != '')
    message = str_io.getvalue().strip()
    logger.removeHandler(handler)
    # When error level == level, check_fix should raise an error
    hdrc2 = hdr.copy()
    raiser = (HeaderDataError, hdrc2.check_fix, logger, level)
    return hdrc, message, raiser
Пример #12
0
def test_telnet_scenario(scenario):
    writer = StringIO()
    parser = TelnetParser(writer=writer.write)
    for feed, expected, sent_expected in scenario:
        data = parser.feed(feed)
        assert data == expected
        ctl = writer.getvalue()
        assert ctl == sent_expected
        writer.truncate(0)
    def test_branch_exceptions(self):
        """
        This wil create conditions to exercise bad paths in the switch_branch function.
        """
        # create bare repo that we can mess with and attempt an import
        bare_repo = os.path.abspath('{0}/{1}'.format(settings.TEST_ROOT,
                                                     'bare.git'))
        os.mkdir(bare_repo)
        self.addCleanup(shutil.rmtree, bare_repo)
        subprocess.check_output([
            'git',
            '--bare',
            'init',
        ],
                                stderr=subprocess.STDOUT,
                                cwd=bare_repo)

        # Build repo dir
        repo_dir = self.git_repo_dir
        if not os.path.isdir(repo_dir):
            os.mkdir(repo_dir)
        self.addCleanup(shutil.rmtree, repo_dir)

        rdir = '{0}/bare'.format(repo_dir)
        with self.assertRaises(GitImportErrorBadRepo):
            git_import.add_repo('file://{0}'.format(bare_repo), None, None)

        # Get logger for checking strings in logs
        output = StringIO()
        test_log_handler = logging.StreamHandler(output)
        test_log_handler.setLevel(logging.DEBUG)
        glog = git_import.log
        glog.addHandler(test_log_handler)

        # Move remote so fetch fails
        shutil.move(bare_repo, '{0}/not_bare.git'.format(settings.TEST_ROOT))
        try:
            git_import.switch_branch('master', rdir)
        except GitImportError:
            self.assertIn('Unable to fetch remote', output.getvalue())
        shutil.move('{0}/not_bare.git'.format(settings.TEST_ROOT), bare_repo)
        output.truncate(0)

        # Replace origin with a different remote
        subprocess.check_output([
            'git',
            'remote',
            'rename',
            'origin',
            'blah',
        ],
                                stderr=subprocess.STDOUT,
                                cwd=rdir)
        with self.assertRaises(GitImportError):
            git_import.switch_branch('master', rdir)
        self.assertIn('Getting a list of remote branches failed',
                      output.getvalue())
class GbpLogTester(object):
    """
    Helper class for tests that need to capture logging output
    """
    def __init__(self):
        """Object initialization"""
        self._log = None
        self._loghandler = None

    def _capture_log(self, capture=True):
        """ Capture log"""
        if capture:
            assert self._log is None, "Log capture already started"
            self._log = StringIO()
            self._loghandler = gbp.log.GbpStreamHandler(self._log, False)
            self._loghandler.addFilter(gbp.log.GbpFilter([gbp.log.WARNING,
                                                          gbp.log.ERROR]))
            handlers = list(gbp.log.LOGGER.handlers)
            for hdl in handlers:
                gbp.log.LOGGER.removeHandler(hdl)
            gbp.log.LOGGER.addHandler(self._loghandler)
        else:
            assert self._log is not None, "Log capture not started"
            gbp.log.LOGGER.removeHandler(self._loghandler)
            self._loghandler.close()
            self._loghandler = None
            self._log.close()
            self._log = None

    def _get_log(self):
        """Get the captured log output"""
        self._log.seek(0)
        return self._log.readlines()

    def _check_log_empty(self):
        """Check that nothig was logged"""
        output = self._get_log()
        ok_(output == [], "Log is not empty: %s" % output)

    def _check_log(self, linenum, regex):
        """Check that the specified line on log matches expectations"""
        if self._log is None:
            raise Exception("BUG in unittests: no log captured!")
        log = self._get_log()
        assert_less(linenum, len(log),
                    "Not enough log lines: %d" % len(log))
        output = self._get_log()[linenum].strip()
        ok_(re.match(regex, output),
            "Log entry '%s' doesn't match '%s'" % (output, regex))

    def _clear_log(self):
        """Clear the mock strerr"""
        if self._log is not None:
            self._log.seek(0)
            self._log.truncate()
Пример #15
0
class GbpLogTester(object):
    """
    Helper class for tests that need to capture logging output
    """
    def __init__(self):
        """Object initialization"""
        self._log = None
        self._loghandler = None

    def _capture_log(self, capture=True):
        """ Capture log"""
        if capture:
            assert self._log is None, "Log capture already started"
            self._log = StringIO()
            self._loghandler = gbp.log.GbpStreamHandler(self._log, False)
            self._loghandler.addFilter(
                gbp.log.GbpFilter([gbp.log.WARNING, gbp.log.ERROR]))
            handlers = list(gbp.log.LOGGER.handlers)
            for hdl in handlers:
                gbp.log.LOGGER.removeHandler(hdl)
            gbp.log.LOGGER.addHandler(self._loghandler)
        else:
            assert self._log is not None, "Log capture not started"
            gbp.log.LOGGER.removeHandler(self._loghandler)
            self._loghandler.close()
            self._loghandler = None
            self._log.close()
            self._log = None

    def _get_log(self):
        """Get the captured log output"""
        self._log.seek(0)
        return self._log.readlines()

    def _check_log_empty(self):
        """Check that nothig was logged"""
        output = self._get_log()
        ok_(output == [], "Log is not empty: %s" % output)

    def _check_log(self, linenum, regex):
        """Check that the specified line on log matches expectations"""
        if self._log is None:
            raise Exception("BUG in unittests: no log captured!")
        log = self._get_log()
        assert_less(linenum, len(log), "Not enough log lines: %d" % len(log))
        output = self._get_log()[linenum].strip()
        ok_(re.match(regex, output),
            "Log entry '%s' doesn't match '%s'" % (output, regex))

    def _clear_log(self):
        """Clear the mock strerr"""
        if self._log is not None:
            self._log.seek(0)
            self._log.truncate()
Пример #16
0
def test_output_bold_green_on_bold_white():
    "file-like filter output: bold green on white"

    io = StringIO()
    couleur.proxy(io).enable()
    io.write("#{bold}#{green}#{on:white}Hello\n")
    assert_equals('\033[1;32;47mHello\n', io.getvalue())
    couleur.proxy(io).disable()
    io.seek(0)
    io.truncate()
    io.write("#{black}should not be translated\n")
    assert_equals('#{black}should not be translated\n', io.getvalue())
Пример #17
0
def test_output_green_and_red_on_white_foreground():
    "file-like filter output: green foreground and white on red background"

    io = StringIO()
    couleur.proxy(io).enable()
    io.write("#{green}Hello #{white}#{on:red}Italy!\n")
    assert_equals('\033[32mHello \033[37;41mItaly!\n', io.getvalue())
    couleur.proxy(io).disable()
    io.seek(0)
    io.truncate()
    io.write("#{black}should not be translated\n")
    assert_equals('#{black}should not be translated\n', io.getvalue())
Пример #18
0
def test_output_green_foreground():
    "file-like filter output: green foreground"

    io = StringIO()
    couleur.proxy(io).enable()
    io.write("#{green}Hello Green!\n")
    assert_equals('\033[32mHello Green!\n', io.getvalue())
    couleur.proxy(io).disable()
    io.seek(0)
    io.truncate()
    io.write("#{black}should not be translated\n")
    assert_equals('#{black}should not be translated\n', io.getvalue())
Пример #19
0
def test_output_black_on_white_foreground():
    "file-like filter output: black foreground on white background"

    io = StringIO()
    couleur.proxy(io).enable()
    io.write("#{black}#{on:white}Hello Black!\n")
    assert_equals('\033[30;47mHello Black!\n', io.getvalue())
    couleur.proxy(io).disable()
    io.seek(0)
    io.truncate()
    io.write("#{black}should not be translated\n")
    assert_equals('#{black}should not be translated\n', io.getvalue())
Пример #20
0
def test_logging():
    rep = Report(ValueError, 20, 'msg', 'fix')
    str_io = StringIO()
    logger = logging.getLogger('test.logger')
    logger.setLevel(30)  # defaultish level
    logger.addHandler(logging.StreamHandler(str_io))
    rep.log_raise(logger)
    assert_equal(str_io.getvalue(), '')
    rep.problem_level = 30
    rep.log_raise(logger)
    assert_equal(str_io.getvalue(), 'msg; fix\n')
    str_io.truncate(0)
    str_io.seek(0)
Пример #21
0
def test_logging():
    rep = Report(ValueError, 20, 'msg', 'fix')
    str_io = StringIO()
    logger = logging.getLogger('test.logger')
    logger.setLevel(30)  # defaultish level
    logger.addHandler(logging.StreamHandler(str_io))
    rep.log_raise(logger)
    assert_equal(str_io.getvalue(), '')
    rep.problem_level = 30
    rep.log_raise(logger)
    assert_equal(str_io.getvalue(), 'msg; fix\n')
    str_io.truncate(0)
    str_io.seek(0)
Пример #22
0
def test_ignoring_colors():
    "file-like filter output: ignoring output"

    io = StringIO()
    couleur.proxy(io).enable()
    couleur.proxy(io).ignore()
    io.write("#{bold}#{green}#{on:white}Hello\n")
    assert_equals('Hello\n', io.getvalue())
    couleur.proxy(io).disable()
    io.seek(0)
    io.truncate()
    io.write("#{black}should not be translated\n")
    assert_equals('#{black}should not be translated\n', io.getvalue())
Пример #23
0
class XMLExporter(object):
    def __init__(self):
        self.buff = StringIO()
        self.escape_html = False

    def startDocument(self):
        return '<?xml version="1.0" encoding="UTF-8"?>'

    def addQuickElement(self,
                        name,
                        contents=None,
                        attrs=None,
                        disable_escape_html=False):
        if attrs is None:
            attrs = {}
        self.startElement(name, attrs)
        if contents is not None:
            self.characters(contents, disable_escape_html=disable_escape_html)
            return self.endElement(name)

        return self.flush() + '\n'

    def flush(self):
        self.buff.seek(0)
        data = self.buff.getvalue()
        self.buff.seek(0)
        self.buff.truncate()

        return data

    def startElement(self, name, attrs=None, self_closing=False):
        if attrs is None:
            attrs = {}
        self.buff.write(u'<' + name)
        for (name, value) in list(attrs.items()):
            self.buff.write(u' %s="%s"' % (name, value))
        if self_closing:
            self.buff.write(u' />')
        else:
            self.buff.write(u'>')

    def characters(self, content, disable_escape_html=False):
        if self.escape_html and not disable_escape_html:
            escaped_content = html.escape(content)
        else:
            escaped_content = content
        self.buff.write(six.text_type(escaped_content))

    def endElement(self, name):
        self.buff.write(u'</%s>' % name)
        return self.flush() + '\n'
Пример #24
0
def search_text_in_pdf(path, searchFor, matches):
    rsrcmgr = PDFResourceManager()
    retstr = StringIO()
    codec = 'utf-8'
    laparams = LAParams()
    device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
    fp = open(path, 'rb')
    interpreter = PDFPageInterpreter(rsrcmgr, device)
    password = ""
    maxpages = 0
    caching = True
    pagenos = set()

    pageIndex = 0
    for page in PDFPage.get_pages(fp,
                                  pagenos,
                                  maxpages=maxpages,
                                  password=password,
                                  caching=caching,
                                  check_extractable=True):
        interpreter.process_page(page)
        text = retstr.getvalue()
        search_word = searchFor
        searchResults = re.search(search_word, text, re.IGNORECASE)
        if searchResults:
            for reg in searchResults.regs:
                if path in matches:
                    matches[path].append({
                        'page': pageIndex,
                        'startChar': reg[0],
                        'startChar': reg[0],
                        'endChar': reg[0]
                    })
                else:
                    matches[path] = [{
                        'page': pageIndex,
                        'startChar': reg[0],
                        'startChar': reg[0],
                        'endChar': reg[0]
                    }]
        pageIndex = pageIndex + 1
        retstr.truncate(0)
        retstr.seek(0)

    fp.close()
    device.close()
    retstr.close()
    return text
Пример #25
0
def generate_csv(results):
    output_file = StringIO()
    yield output_file.getvalue()
    output_file.truncate(0)
    output = None
    for marker in results.all():
        serialized = marker.serialize()
        if not output:
            output = csv.DictWriter(output_file, serialized.keys())
            output.writeheader()

        row = {k: v.encode('utf8')
        if type(v) is six.text_type else v
               for k, v in iteritems(serialized)}
        output.writerow(row)
        yield output_file.getvalue()
        output_file.truncate(0)
Пример #26
0
def generate_csv(results):
    output_file = StringIO()
    yield output_file.getvalue()
    output_file.truncate(0)
    output = None
    for marker in results.all():
        serialized = marker.serialize()
        if not output:
            output = csv.DictWriter(output_file, serialized.keys())
            output.writeheader()

        row = {k: v.encode('utf8')
        if type(v) is six.text_type else v
               for k, v in iteritems(serialized)}
        output.writerow(row)
        yield output_file.getvalue()
        output_file.truncate(0)
Пример #27
0
def test_output():
    status, warnings = StringIO(), StringIO()
    app = TestApp(status=status, warning=warnings)
    try:
        status.truncate(0) # __init__ writes to status
        status.seek(0)
        app.info("Nothing here...")
        assert status.getvalue() == "Nothing here...\n"
        status.truncate(0)
        status.seek(0)
        app.info("Nothing here...", True)
        assert status.getvalue() == "Nothing here..."

        old_count = app._warncount
        app.warn("Bad news!")
        assert warnings.getvalue() == "WARNING: Bad news!\n"
        assert app._warncount == old_count + 1
    finally:
        app.cleanup()
Пример #28
0
class CSVWriter(object):
    """ ref: http://docs.python.jp/2/library/csv.html
    """
    def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwargs):
        self.queue = StringIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwargs)
        self.stream = f
        self.encoding = encoding

    def write(self, row):
        # 一度 utf-8 としてキューに書いてから、それを読み出して指定のエンコーディングで書き直す
        self.writer.writerow([s.encode("utf-8") for s in row])
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        self.stream.write(data)
        # キューをクリア
        self.queue.truncate(0)

    def writerows(self, rows):
        [self.write(row) for row in rows]
Пример #29
0
class OutputStream(_WritelnDecorator):
    def __init__(self, on_stream, off_stream):
        self.capture_stream = IO()
        self.on_stream = on_stream
        self.off_stream = off_stream
        self.stream = on_stream

    def on(self):
        self.stream = self.on_stream

    def off(self):
        self.stream = self.off_stream

    def capture(self):
        self.capture_stream.truncate()
        self.stream = self.capture_stream

    def get_captured(self):
        self.capture_stream.seek(0)
        return self.capture_stream.read()
Пример #30
0
class UnicodeWriter:
    """
    A CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    """
    def __init__(self, f, dialect=excel, encoding="utf-8", **kwargs):
        self.queue = StringIO()
        self.writer = writer(self.queue, dialect=dialect, **kwargs)
        self.stream = f
        self.encoding = encoding
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writerow(self, row):
        self.writer.writerow([unicode(s).encode(self.encoding) for s in row])
        data = self.queue.getvalue()
        data = data.decode(self.encoding)
        data = self.encoder.encode(data)
        self.stream.write(data)
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Пример #31
0
class UnicodeWriter:
    """
    A CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    """

    def __init__(self, f, dialect=excel, encoding="utf-8", **kwargs):
        self.queue = StringIO()
        self.writer = writer(self.queue, dialect=dialect, **kwargs)
        self.stream = f
        self.encoding = encoding
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writerow(self, row):
        self.writer.writerow([unicode(s).encode(self.encoding) for s in row])
        data = self.queue.getvalue()
        data = data.decode(self.encoding)
        data = self.encoder.encode(data)
        self.stream.write(data)
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)
Пример #32
0
def test_report_strings():
    rep = Report()
    assert_not_equal(rep.__str__(), '')
    assert_equal(rep.message, '')
    str_io = StringIO()
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), '')
    rep = Report(ValueError, 20, 'msg', 'fix')
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), '')
    rep.problem_level = 30
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), 'Level 30: msg; fix\n')
    str_io.truncate(0)
    str_io.seek(0)
    # No fix string, no fix message
    rep.fix_msg = ''
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), 'Level 30: msg\n')
    rep.fix_msg = 'fix'
    str_io.truncate(0)
    str_io.seek(0)
    # If we drop the level, nothing goes to the log
    rep.problem_level = 20
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), '')
    # Unless we set the default log level in the call
    rep.write_raise(str_io, log_level=20)
    assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n')
    str_io.truncate(0)
    str_io.seek(0)
    # If we set the error level down this low, we raise an error
    assert_raises(ValueError, rep.write_raise, str_io, 20)
    # But the log level wasn't low enough to do a log entry
    assert_equal(str_io.getvalue(), '')
    # Error still raised with lower log threshold, but now we do get a
    # log entry
    assert_raises(ValueError, rep.write_raise, str_io, 20, 20)
    assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n')
    # If there's no error, we can't raise
    str_io.truncate(0)
    str_io.seek(0)
    rep.error = None
    rep.write_raise(str_io, 20)
    assert_equal(str_io.getvalue(), '')
Пример #33
0
def test_report_strings():
    rep = Report()
    assert_not_equal(rep.__str__(), '')
    assert_equal(rep.message, '')
    str_io = StringIO()
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), '')
    rep = Report(ValueError, 20, 'msg', 'fix')
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), '')
    rep.problem_level = 30
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), 'Level 30: msg; fix\n')
    str_io.truncate(0)
    str_io.seek(0)
    # No fix string, no fix message
    rep.fix_msg = ''
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), 'Level 30: msg\n')
    rep.fix_msg = 'fix'
    str_io.truncate(0)
    str_io.seek(0)
    # If we drop the level, nothing goes to the log
    rep.problem_level = 20
    rep.write_raise(str_io)
    assert_equal(str_io.getvalue(), '')
    # Unless we set the default log level in the call
    rep.write_raise(str_io, log_level=20)
    assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n')
    str_io.truncate(0)
    str_io.seek(0)
    # If we set the error level down this low, we raise an error
    assert_raises(ValueError, rep.write_raise, str_io, 20)
    # But the log level wasn't low enough to do a log entry
    assert_equal(str_io.getvalue(), '')
    # Error still raised with lower log threshold, but now we do get a
    # log entry
    assert_raises(ValueError, rep.write_raise, str_io, 20, 20)
    assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n')
    # If there's no error, we can't raise
    str_io.truncate(0)
    str_io.seek(0)
    rep.error = None
    rep.write_raise(str_io, 20)
    assert_equal(str_io.getvalue(), '')
Пример #34
0
class QuickDER2source(QuickDERgeneric):

    def __init__(self, semamod, outfn, refmods):
        self.to_be_defined = None
        self.to_be_overlaid = None
        self.cursor_offset = None
        self.nested_typerefs = None
        self.nested_typecuts = None

        self.semamod = semamod
        self.refmods = refmods

        self.buffer = StringIO()
        self.linebuffer = StringIO()

        self.comma1 = None
        self.comma0 = None

        self.unit, curext = path.splitext(outfn)

        # typedef b a adds a: b to this dict, to weed out dups
        self.issued_typedefs = {}

        # Setup function maps
        self.pack_funmap = {
            DefinedType: self.packDefinedType,
            ValueAssignment: self.packValueAssignment,
            TypeAssignment: self.packTypeAssignment,
            TaggedType: self.packTaggedType,
            SimpleType: self.packSimpleType,
            BitStringType: self.packSimpleType,
            ValueListType: self.packSimpleType,
            SequenceType: self.packSequenceType,
            SetType: self.packSetType,
            ChoiceType: self.packChoiceType,
            SequenceOfType: self.packRepeatingStructureType,
            SetOfType: self.packRepeatingStructureType,
            ComponentType: self.packSimpleType,
        }

    def write(self, txt):
        self.buffer.write(txt)
        self.linebuffer.write(txt)

    def writeln(self, txt=''):
        self.buffer.write(txt + '\n')
        self.linebuffer.write(txt)
        logger.info(self.linebuffer.getvalue())
        self.linebuffer.truncate(0)
        self.linebuffer.seek(0)

    def close(self):
        pass

    def generate_head(self):
        pass

    def generate_tail(self):
        pass

    def generate_unpack(self):
        pass

    def generate_pack(self):
        for assigncompos in dependency_sort(self.semamod.assignments):
            for assign in assigncompos:
                self.generate_pack_node(assign, None, None)

    def generate_pack_node(self, node, tp, fld):
        tnm = type(node)
        if tnm in self.pack_funmap:
            self.pack_funmap[tnm](node, tp, fld)

    def packValueAssignment(self, node, tp, fld):
        pass

    def packDefinedType(self, node, tp, fld):
        pass

    def packSimpleType(self, node, tp, fld):
        pass

    def packTypeAssignment(self, node, tp, fld):
        # Issue each typedef b a only once, because -- even if you
        # use the same b, a each time -- type-redefinition is a C11
        # feature, which isn't what we want.
        # self.to_be_overlaid is a list of (tname,tdecl) pairs to be created
        self.to_be_defined = []
        self.to_be_overlaid = [(tosym(node.type_name), node.type_decl)]
        while len(self.to_be_overlaid) > 0:
            (tname, tdecl) = self.to_be_overlaid.pop(0)
            key = (self.unit, tname)
            if key not in self.issued_typedefs:
                self.issued_typedefs[key] = str(tdecl)

                self.writeln('KeehiveError')
                self.writeln('DER_PACK_{}('.format(tname))
                self.writeln('){')
                #self.generate_pack_node(tdecl, tname, '0')
                self.writeln(')}')
                self.writeln()

                self.writeln('KeehiveError')
                self.writeln('DER_UNPACK_{}('.format(tname))
                self.writeln('){')
                #self.generate_pack_node(tdecl, tname, '0')
                self.writeln(')}')
                self.writeln()


            else:
                if self.issued_typedefs[key] != str(tdecl):
                    raise TypeError("Redefinition of type %s." % key[1])
        for tbd in self.to_be_defined:
            if tbd != 'DER_OVLY_' + self.unit + '_' + tosym(node.type_name) + '_0':
                self.writeln('typedef struct ' + tbd + ' ' + tbd + ';')
        self.writeln()

    def packSequenceType(self, node, tp, fld, naked=False):
        pass

    def packSetType(self, node, tp, fld, naked=False):
        pass

    def packChoiceType(self, node, tp, fld, naked=False):
        pass

    def packRepeatingStructureType(self, node, tp, fld):
        pass

    def packTaggedType(self, node, tp, fld):
        pass
Пример #35
0
class TestResult(unittest.TestResult):
    def __init__(self, options, tests, layer_name=None):
        unittest.TestResult.__init__(self)
        self.options = options
        # Calculate our list of relevant layers we need to call testSetUp
        # and testTearDown on.
        layers = []
        gather_layers(layer_from_name(layer_name), layers)
        self.layers = order_by_bases(layers)
        count = 0
        for test in tests:
            count += test.countTestCases()
        self.count = count
        self._stdout_buffer = None
        self._stderr_buffer = None
        self._original_stdout = sys.stdout
        self._original_stderr = sys.stderr

    def testSetUp(self):
        """A layer may define a setup method to be called before each
        individual test.
        """
        for layer in self.layers:
            if hasattr(layer, 'testSetUp'):
                layer.testSetUp()

    def testTearDown(self):
        """A layer may define a teardown method to be called after each
           individual test.

           This is useful for clearing the state of global
           resources or resetting external systems such as relational
           databases or daemons.
        """
        for layer in self.layers[-1::-1]:
            if hasattr(layer, 'testTearDown'):
                layer.testTearDown()

    def _setUpStdStreams(self):
        """Set up buffered standard streams, if requested."""
        if self.options.buffer:
            if self._stdout_buffer is None:
                self._stdout_buffer = StringIO()
            if self._stderr_buffer is None:
                self._stderr_buffer = StringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def _restoreStdStreams(self):
        """Restore the buffered standard streams and return any contents."""
        if self.options.buffer:
            stdout = sys.stdout.getvalue()
            stderr = sys.stderr.getvalue()
            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate(0)
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate(0)
            return stdout, stderr
        else:
            return None, None

    def startTest(self, test):
        self.testSetUp()
        unittest.TestResult.startTest(self, test)
        testsRun = self.testsRun - 1  # subtract the one the base class added
        count = test.countTestCases()
        self.testsRun = testsRun + count

        self.options.output.start_test(test, self.testsRun, self.count)

        self._threads = threading.enumerate()
        self._start_time = time.time()

        self._setUpStdStreams()

    def addSuccess(self, test):
        self._restoreStdStreams()
        t = max(time.time() - self._start_time, 0.0)
        self.options.output.test_success(test, t)

    def addSkip(self, test, reason):
        self._restoreStdStreams()
        unittest.TestResult.addSkip(self, test, reason)
        self.options.output.test_skipped(test, reason)

    def addError(self, test, exc_info):
        stdout, stderr = self._restoreStdStreams()
        self.options.output.test_error(test,
                                       time.time() - self._start_time,
                                       exc_info,
                                       stdout=stdout,
                                       stderr=stderr)

        unittest.TestResult.addError(self, test, exc_info)

        if self.options.post_mortem:
            if self.options.resume_layer:
                self.options.output.error_with_banner("Can't post-mortem debug"
                                                      " when running a layer"
                                                      " as a subprocess!")
            else:
                zope.testrunner.debug.post_mortem(exc_info)
        elif self.options.stop_on_error:
            self.stop()

    def addFailure(self, test, exc_info):
        stdout, stderr = self._restoreStdStreams()
        self.options.output.test_failure(test,
                                         time.time() - self._start_time,
                                         exc_info,
                                         stdout=stdout,
                                         stderr=stderr)

        unittest.TestResult.addFailure(self, test, exc_info)

        if self.options.post_mortem:
            # XXX: mgedmin: why isn't there a resume_layer check here like
            # in addError?
            zope.testrunner.debug.post_mortem(exc_info)
        elif self.options.stop_on_error:
            self.stop()

    def addExpectedFailure(self, test, exc_info):
        self._restoreStdStreams()
        t = max(time.time() - self._start_time, 0.0)
        self.options.output.test_success(test, t)

        unittest.TestResult.addExpectedFailure(self, test, exc_info)

    def addUnexpectedSuccess(self, test):
        stdout, stderr = self._restoreStdStreams()
        self.options.output.test_error(
            test,
            time.time() - self._start_time,
            (UnexpectedSuccess, UnexpectedSuccess(), None),
            stdout=stdout,
            stderr=stderr)

        unittest.TestResult.addUnexpectedSuccess(self, test)

        if self.options.post_mortem:
            if self.options.resume_layer:
                self.options.output.error_with_banner("Can't post-mortem debug"
                                                      " when running a layer"
                                                      " as a subprocess!")
            else:
                # XXX: what exc_info? there's no exc_info!
                # flake8 is correct, but keep it quiet for now ...
                zope.testrunner.debug.post_mortem(exc_info)  # noqa: F821
        elif self.options.stop_on_error:
            self.stop()

    def stopTest(self, test):
        self.testTearDown()
        self.options.output.stop_test(test)

        if is_jython:
            pass
        else:
            if gc.garbage:
                self.options.output.test_garbage(test, gc.garbage)
                # TODO: Perhaps eat the garbage here, so that the garbage isn't
                #       printed for every subsequent test.

        # Did the test leave any new threads behind?
        new_threads = []
        for t in threading.enumerate():
            if t.is_alive() and t not in self._threads:
                if not any([
                        re.match(p, t.name)
                        for p in self.options.ignore_new_threads
                ]):
                    new_threads.append(t)

        if new_threads:
            self.options.output.test_threads(test, new_threads)
Пример #36
0
def codeCheck():
    """
        Performs a static source code analysis using PyCharm's built-in
        code inspector.

        The default inspection profile of the project is used, which unless
        modified by the developer will be the common HRI-EU profile coming
        from CreatePyCharmProject.py

        If there is no ".idea" directory in the current directory, it will
        be temporarily created and deleted when finished.

        @returns: defects as list of XML strings

        @see: parseCodeCheckResult()

    """
    ProcessEnv.source(ToolBOSSettings.getConfigOption('package_pycharm'))

    output = StringIO()
    FastScript.execProgram('ps aux', stdout=output, stderr=output)

    if output.getvalue().find('pycharm') > 0:
        raise RuntimeError(
            'PyCharm already running, unable to invoke code checker')

    # create project files if not existing
    if os.path.exists('.idea'):
        created = False
    else:
        created = True
        #createUserConfig()
        createProject()

    resultsDir = 'build/PY05'
    FastScript.remove(resultsDir)
    FastScript.mkdir(resultsDir)

    output.truncate(0)

    cmd = 'inspect.sh . HRI-EU %s' % resultsDir

    try:
        logging.info('running analysis...')
        FastScript.execProgram(cmd, stdout=output, stderr=output)

        if Any.getDebugLevel() > 3:
            logging.info('\n' + output.getvalue())

    except subprocess.CalledProcessError:
        if output.getvalue().find('No valid license found') > 0:
            raise RuntimeError('PyCharm: No valid license found')
        else:
            raise RuntimeError(output.getvalue())

    # delete project files if we have created them
    if created:
        FastScript.remove('.idea')

    resultList = []

    for filePath in glob.glob('build/PY05/*.xml'):
        Any.requireIsFile(filePath)
        content = FastScript.getFileContent(filePath)

        resultList.append(content)

    return resultList
Пример #37
0
class TestResult(unittest.TestResult):
    """Holder for test result information.

    Test results are automatically managed by the TestCase and TestSuite
    classes, and do not need to be explicitly manipulated by writers of tests.

    Each instance holds the total number of tests run, and collections of
    failures and errors that occurred among those test runs. The collections
    contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
    formatted traceback of the error that occurred.
    """

    _previousTestClass = None
    _moduleSetUpFailed = False

    def __init__(self):
        self.failfast = False
        self.failures = []
        self.passes = []
        self.errors = []
        self.cleanup_errors = []
        self.testsRun = 0
        self.skipped = []
        self.expectedFailures = []
        self.unexpectedSuccesses = []
        self.shouldStop = False
        self.buffer = False
        self._stdout_buffer = None
        self._stderr_buffer = None
        self._original_stdout = sys.stdout
        self._original_stderr = sys.stderr
        self._mirrorOutput = False

    def startTest(self, test):
        "Called when the given test is about to be run"
        self.testsRun += 1
        self._mirrorOutput = False
        if self.buffer:
            if self._stderr_buffer is None:
                self._stderr_buffer = SixStringIO()
                self._stdout_buffer = SixStringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def startTestRun(self):
        """Called once before any tests are executed.

        See startTest for a method called before each test.
        """

    def stopTest(self, test):
        """Called when the given test has been run"""
        if self.buffer:
            if self._mirrorOutput:
                output = sys.stdout.getvalue()
                error = sys.stderr.getvalue()
                if output:
                    if not output.endswith("\n"):
                        output += "\n"
                    self._original_stdout.write(STDOUT_LINE % output)
                if error:
                    if not error.endswith("\n"):
                        error += "\n"
                    self._original_stderr.write(STDERR_LINE % error)

            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate()
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate()
        self._mirrorOutput = False

    def stopTestRun(self):
        """Called once after all tests are executed.

        See stopTest for a method called after each test.
        """

    @failfast
    def addError(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info().
        """
        self.errors.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    def addCleanupError(self, test, err):
        """Called when an error has occurred during cleanup. 'err' is a tuple of
        values as returned by sys.exc_info().
        """
        self.cleanup_errors.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    @failfast
    def addFailure(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info()."""
        self.failures.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    def addSuccess(self, test):
        "Called when a test has completed successfully"
        self.passes.append(test)
        pass

    def addSkip(self, test, reason):
        """Called when a test is skipped."""
        self.skipped.append((test, reason))

    def addExpectedFailure(self, test, err, bugnumber):
        """Called when an expected failure/error occured."""
        self.expectedFailures.append((test, self._exc_info_to_string(err, test)))

    @failfast
    def addUnexpectedSuccess(self, test, bugnumber):
        """Called when a test was expected to fail, but succeed."""
        self.unexpectedSuccesses.append(test)

    def wasSuccessful(self):
        "Tells whether or not this result was a success"
        return len(self.failures) + len(self.errors) == 0

    def stop(self):
        "Indicates that the tests should be aborted"
        self.shouldStop = True

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next
        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
            msgLines = traceback.format_exception(exctype, value, tb, length)
        else:
            msgLines = traceback.format_exception(exctype, value, tb)

        if self.buffer:
            output = sys.stdout.getvalue()
            error = sys.stderr.getvalue()
            if output:
                if not output.endswith("\n"):
                    output += "\n"
                msgLines.append(STDOUT_LINE % output)
            if error:
                if not error.endswith("\n"):
                    error += "\n"
                msgLines.append(STDERR_LINE % error)
        return "".join(msgLines)

    def _is_relevant_tb_level(self, tb):
        return "__unittest" in tb.tb_frame.f_globals

    def _count_relevant_tb_levels(self, tb):
        length = 0
        while tb and not self._is_relevant_tb_level(tb):
            length += 1
            tb = tb.tb_next
        return length

    def __repr__(self):
        return "<%s run=%i errors=%i failures=%i>" % (
            util.strclass(self.__class__),
            self.testsRun,
            len(self.errors),
            len(self.failures),
        )
Пример #38
0
class QuickDER2source(QuickDERgeneric):
    def __init__(self, semamod, outfn, refmods):
        self.to_be_defined = None
        self.to_be_overlaid = None
        self.cursor_offset = None
        self.nested_typerefs = None
        self.nested_typecuts = None

        self.semamod = semamod
        self.refmods = refmods

        self.buffer = StringIO()
        self.linebuffer = StringIO()

        self.comma1 = None
        self.comma0 = None

        self.unit, curext = path.splitext(outfn)

        # typedef b a adds a: b to this dict, to weed out dups
        self.issued_typedefs = {}

        # Setup function maps
        self.pack_funmap = {
            DefinedType: self.packDefinedType,
            ValueAssignment: self.packValueAssignment,
            TypeAssignment: self.packTypeAssignment,
            TaggedType: self.packTaggedType,
            SimpleType: self.packSimpleType,
            BitStringType: self.packSimpleType,
            ValueListType: self.packSimpleType,
            SequenceType: self.packSequenceType,
            SetType: self.packSetType,
            ChoiceType: self.packChoiceType,
            SequenceOfType: self.packRepeatingStructureType,
            SetOfType: self.packRepeatingStructureType,
            ComponentType: self.packSimpleType,
        }

    def write(self, txt):
        self.buffer.write(txt)
        self.linebuffer.write(txt)

    def writeln(self, txt=''):
        self.buffer.write(txt + '\n')
        self.linebuffer.write(txt)
        logger.info(self.linebuffer.getvalue())
        self.linebuffer.truncate(0)
        self.linebuffer.seek(0)

    def close(self):
        pass

    def generate_head(self):
        pass

    def generate_tail(self):
        pass

    def generate_unpack(self):
        pass

    def generate_pack(self):
        for assigncompos in dependency_sort(self.semamod.assignments):
            for assign in assigncompos:
                self.generate_pack_node(assign, None, None)

    def generate_pack_node(self, node, tp, fld):
        tnm = type(node)
        if tnm in self.pack_funmap:
            self.pack_funmap[tnm](node, tp, fld)

    def packValueAssignment(self, node, tp, fld):
        pass

    def packDefinedType(self, node, tp, fld):
        pass

    def packSimpleType(self, node, tp, fld):
        pass

    def packTypeAssignment(self, node, tp, fld):
        # Issue each typedef b a only once, because -- even if you
        # use the same b, a each time -- type-redefinition is a C11
        # feature, which isn't what we want.
        # self.to_be_overlaid is a list of (tname,tdecl) pairs to be created
        self.to_be_defined = []
        self.to_be_overlaid = [(tosym(node.type_name), node.type_decl)]
        while len(self.to_be_overlaid) > 0:
            (tname, tdecl) = self.to_be_overlaid.pop(0)
            key = (self.unit, tname)
            if key not in self.issued_typedefs:
                self.issued_typedefs[key] = str(tdecl)

                self.writeln('KeehiveError')
                self.writeln('DER_PACK_{}('.format(tname))
                self.writeln('){')
                #self.generate_pack_node(tdecl, tname, '0')
                self.writeln(')}')
                self.writeln()

                self.writeln('KeehiveError')
                self.writeln('DER_UNPACK_{}('.format(tname))
                self.writeln('){')
                #self.generate_pack_node(tdecl, tname, '0')
                self.writeln(')}')
                self.writeln()

            else:
                if self.issued_typedefs[key] != str(tdecl):
                    raise TypeError("Redefinition of type %s." % key[1])
        for tbd in self.to_be_defined:
            if tbd != 'DER_OVLY_' + self.unit + '_' + tosym(
                    node.type_name) + '_0':
                self.writeln('typedef struct ' + tbd + ' ' + tbd + ';')
        self.writeln()

    def packSequenceType(self, node, tp, fld, naked=False):
        pass

    def packSetType(self, node, tp, fld, naked=False):
        pass

    def packChoiceType(self, node, tp, fld, naked=False):
        pass

    def packRepeatingStructureType(self, node, tp, fld):
        pass

    def packTaggedType(self, node, tp, fld):
        pass
Пример #39
0
def need_to_install_distro(remote):
    """
    Installing kernels on rpm won't setup grub/boot into them.  This installs
    the newest kernel package and checks its version and compares against
    the running kernel (uname -r).  Similar check for deb.

    :returns: False if running the newest distro kernel. Returns the version of
              the newest if it is not running.
    """
    dist_release = remote.os.name
    package_type = remote.os.package_type
    current = get_version_of_running_kernel(remote)
    log.info("Running kernel on {node}: {version}".format(
        node=remote.shortname, version=current))
    installed_version = None
    if package_type == 'rpm':
        if dist_release in ['opensuse', 'sle']:
            install_stdout = remote.sh(
                'sudo zypper --non-interactive install kernel-default')
        else:
            install_stdout = remote.sh('sudo yum install -y kernel')
            match = re.search("Package (.*) already installed",
                              install_stdout,
                              flags=re.MULTILINE)
            if 'Nothing to do' in install_stdout:
                installed_version = match.groups()[0] if match else ''
                err_mess = StringIO()
                err_mess.truncate(0)
                remote.run(args=[
                    'echo', 'no',
                    run.Raw('|'), 'sudo', 'yum', 'reinstall', 'kernel',
                    run.Raw('||'), 'true'
                ],
                           stderr=err_mess)
                reinstall_stderr = err_mess.getvalue()
                err_mess.close()
                if 'Skipping the running kernel' in reinstall_stderr:
                    running_version = re.search(
                        "Skipping the running kernel: (.*)",
                        reinstall_stderr,
                        flags=re.MULTILINE).groups()[0]
                    if installed_version == running_version:
                        log.info(
                            'Newest distro kernel already installed and running'
                        )
                        return False
                else:
                    remote.run(args=[
                        'sudo', 'yum', 'reinstall', '-y', 'kernel',
                        run.Raw('||'), 'true'
                    ])
        newest = get_latest_image_version_rpm(remote)

    if package_type == 'deb':
        newest = get_latest_image_version_deb(remote, dist_release)

    if current in newest or current.replace('-', '_') in newest:
        log.info('Newest distro kernel installed and running')
        return False
    log.info('Not newest distro kernel. Current: {cur} Expected: {new}'.format(
        cur=current, new=newest))
    return newest
Пример #40
0
class UpgradeTestCase(TestCase):
    layer = UPGRADE_FUNCTIONAL_TESTING

    def setUp(self):
        self.package = (Builder('python package').at_path(
            self.directory).named('the.package'))
        self.portal = self.layer['portal']
        self.portal_setup = getToolByName(self.portal, 'portal_setup')
        self.portal_quickinstaller = getToolByName(self.portal,
                                                   'portal_quickinstaller')

    def tearDown(self):
        self.teardown_logging()

    def grant(self, *roles):
        setRoles(self.portal, TEST_USER_ID, list(roles))
        transaction.commit()

    def login(self, user, browser=None):
        if hasattr(user, 'getUserName'):
            userid = user.getUserName()
        else:
            userid = user

        security_manager = getSecurityManager()
        if userid == SITE_OWNER_NAME:
            login(self.layer['app'], userid)
        else:
            login(self.portal, userid)

        if browser is not None:
            browser_auth_headers = [
                item for item in browser.session_headers
                if item[0] == 'Authorization'
            ]
            browser.login(userid)

        transaction.commit()

        @contextmanager
        def login_context_manager():
            try:
                yield
            finally:
                setSecurityManager(security_manager)
                if browser is not None:
                    browser.clear_request_header('Authorization')
                    [
                        browser.append_request_header(name, value)
                        for (name, value) in browser_auth_headers
                    ]
                transaction.commit()

        return login_context_manager()

    @property
    def directory(self):
        return self.layer['temp_directory']

    @contextmanager
    def package_created(self):
        with create(self.package).zcml_loaded(
                self.layer['configurationContext']) as package:
            yield package

    def default_upgrade(self):
        return Builder('plone upgrade step').upgrading('1000', to='1001')

    def install_profile(self, profileid, version=None):
        self.portal_setup.runAllImportStepsFromProfile(
            'profile-{0}'.format(profileid))
        if version is not None:
            self.portal_setup.setLastVersionForProfile(
                profileid, (six.text_type(version), ))
        transaction.commit()

    def install_profile_upgrades(self, *profileids):
        gatherer = queryAdapter(self.portal_setup, IUpgradeInformationGatherer)
        upgrade_info = [(profile['id'],
                         list(map(itemgetter('id'), profile['upgrades'])))
                        for profile in gatherer.get_upgrades()
                        if profile['id'] in profileids]
        executioner = queryAdapter(self.portal_setup, IExecutioner)
        executioner.install(upgrade_info)

    def record_installed_upgrades(self, profile, *destinations):
        profile = re.sub('^profile-', '', profile)
        recorder = getMultiAdapter((self.portal, profile),
                                   IUpgradeStepRecorder)
        recorder.clear()
        list(map(recorder.mark_as_installed, destinations))
        transaction.commit()

    def clear_recorded_upgrades(self, profile):
        profile = re.sub('^profile-', '', profile)
        recorder = getMultiAdapter((self.portal, profile),
                                   IUpgradeStepRecorder)
        recorder.clear()
        transaction.commit()

    def assert_gathered_upgrades(self, expected, *args, **kwargs):
        gatherer = queryAdapter(self.portal_setup, IUpgradeInformationGatherer)
        result = gatherer.get_profiles(*args, **kwargs)
        got = {}
        for profile in result:
            if profile['id'] not in expected:
                continue

            got_profile = dict(
                (key, []) for key in expected[profile['id']].keys())
            got[profile['id']] = got_profile

            for upgrade in profile['upgrades']:
                for key in got_profile.keys():
                    if upgrade[key]:
                        got_profile[key].append(upgrade['sdest'])

        self.maxDiff = None
        self.assertDictEqual(
            expected, got,
            'Unexpected gatherer result.\n\nPackages in result {0}:'.format(
                [profile['id'] for profile in result]))

    def asset(self, filename):
        return Path(__file__).dirname().joinpath('assets', filename).text()

    @contextmanager
    def assert_resources_recooked(self):
        def get_resources():
            doc = lxml.html.fromstring(self.portal())
            return list(
                map(
                    str.strip,
                    map(
                        six.ensure_str,
                        map(
                            lxml.html.tostring,
                            doc.xpath('//link[@rel="stylesheet"][@href]'
                                      ' | //script[@src]')))))

        resources = get_resources()
        yield
        self.assertNotEqual(resources, get_resources(),
                            'Resurces are not recooked.')

    @contextmanager
    def assert_bundles_combined(self):
        # Note: this is for Plone 5.

        def get_timestamp():
            timestamp_file = self.portal.portal_resources.resource_overrides.production[
                'timestamp.txt']
            # The data contains text, which should be a DateTime.
            # Convert it to an actual DateTime object so we can be sure when comparing it.
            return DateTime(timestamp_file.data.decode('utf8'))

        timestamp = get_timestamp()
        yield
        self.assertLess(timestamp, get_timestamp(),
                        'Timestamp has not been updated.')

    def setup_logging(self):
        self.log = StringIO()
        self.loghandler = logging.StreamHandler(self.log)
        self.logger = logging.getLogger('ftw.upgrade')
        self.logger.setLevel(logging.DEBUG)
        self.logger.addHandler(self.loghandler)

    def teardown_logging(self):
        if getattr(self, 'log', None) is None:
            return

        self.logger.removeHandler(self.loghandler)
        self.log = None
        self.loghandler = None
        self.logger = None

    def get_log(self):
        return self.log.getvalue().splitlines()

    def purge_log(self):
        self.log.seek(0)
        self.log.truncate()
    def test_valid_yasm_version(self):
        out = StringIO()
        sandbox = self.get_sandbox({}, {}, out=out)
        func = sandbox._depends[sandbox['valid_yasm_version']]._func

        # Missing yasm is not an error when nothing requires it.
        func(None, False, False, False)

        # Any version of yasm works when nothing requires it.
        func(Version('1.0'), False, False, False)

        # Any version of yasm works when something requires any version.
        func(Version('1.0'), True, False, False)
        func(Version('1.0'), True, True, False)
        func(Version('1.0'), False, True, False)

        # A version of yasm greater than any requirement works.
        func(Version('1.5'), Version('1.0'), True, False)
        func(Version('1.5'), True, Version('1.0'), False)
        func(Version('1.5'), Version('1.1'), Version('1.0'), False)

        out.truncate(0)
        out.seek(0)
        with self.assertRaises(SystemExit):
            func(None, Version('1.0'), False, False)

        self.assertEqual(
            out.getvalue(),
            ('ERROR: Yasm is required to build with vpx, but you do not appear '
             'to have Yasm installed.\n'),
        )

        out.truncate(0)
        out.seek(0)
        with self.assertRaises(SystemExit):
            func(None, Version('1.0'), Version('1.0'), False)

        self.assertEqual(
            out.getvalue(),
            ('ERROR: Yasm is required to build with jpeg and vpx, but you do not appear '
             'to have Yasm installed.\n'),
        )

        out.truncate(0)
        out.seek(0)
        with self.assertRaises(SystemExit):
            func(None, Version('1.0'), Version('1.0'), Version('1.0'))

        self.assertEqual(
            out.getvalue(),
            ('ERROR: Yasm is required to build with jpeg, libav and vpx, but you do not appear '
             'to have Yasm installed.\n'),
        )

        out.truncate(0)
        out.seek(0)
        with self.assertRaises(SystemExit):
            func(Version('1.0'), Version('1.1'), Version('1.0'), False)

        self.assertEqual(
            out.getvalue(),
            'ERROR: Yasm version 1.1 or greater is required to build with vpx.\n'
        )

        out.truncate(0)
        out.seek(0)
        with self.assertRaises(SystemExit):
            func(Version('1.0'), True, Version('1.0.1'), False)

        self.assertEqual(
            out.getvalue(),
            'ERROR: Yasm version 1.0.1 or greater is required to build with jpeg.\n'
        )
Пример #42
0
    def test_continuation(self):
        out = StringIO()
        name = '%s.test_continuation' % self.__class__.__name__
        logger = logging.getLogger(name)
        logger.setLevel(logging.DEBUG)
        handler = ConfigureOutputHandler(out, out)
        handler.setFormatter(logging.Formatter('%(levelname)s:%(message)s'))
        logger.addHandler(handler)

        logger.info('foo')
        logger.info('checking bar... ')
        logger.info('yes')
        logger.info('qux')

        self.assertEqual(out.getvalue(), 'foo\n'
                         'checking bar... yes\n'
                         'qux\n')

        out.seek(0)
        out.truncate()

        logger.info('foo')
        logger.info('checking bar... ')
        logger.warning('hoge')
        logger.info('no')
        logger.info('qux')

        self.assertEqual(
            out.getvalue(), 'foo\n'
            'checking bar... \n'
            'WARNING:hoge\n'
            ' ... no\n'
            'qux\n')

        out.seek(0)
        out.truncate()

        logger.info('foo')
        logger.info('checking bar... ')
        logger.warning('hoge')
        logger.warning('fuga')
        logger.info('no')
        logger.info('qux')

        self.assertEqual(
            out.getvalue(), 'foo\n'
            'checking bar... \n'
            'WARNING:hoge\n'
            'WARNING:fuga\n'
            ' ... no\n'
            'qux\n')

        out.seek(0)
        out.truncate()
        err = StringIO()

        logger.removeHandler(handler)
        handler = ConfigureOutputHandler(out, err)
        handler.setFormatter(logging.Formatter('%(levelname)s:%(message)s'))
        logger.addHandler(handler)

        logger.info('foo')
        logger.info('checking bar... ')
        logger.warning('hoge')
        logger.warning('fuga')
        logger.info('no')
        logger.info('qux')

        self.assertEqual(out.getvalue(), 'foo\n'
                         'checking bar... no\n'
                         'qux\n')

        self.assertEqual(err.getvalue(), 'WARNING:hoge\n' 'WARNING:fuga\n')
Пример #43
0
def log_chk(hdr, level):
    """ Utility method to check header checking / logging

    Asserts that log entry appears during ``hdr.check_fix`` for logging level
    below `level`.

    Parameters
    ----------
    hdr : instance
        Instance of header class, with methods ``copy`` and check_fix``.  The
        header has some minor error (defect) which can be detected with
        ``check_fix``.
    level : int
        Level (severity) of defect present in `hdr`.  When logging threshold is
        at or below `level`, a message appears in the default log (we test that
        happens).

    Returns
    -------
    hdrc : instance
        Header, with defect corrected.
    message : str
        Message generated in log when defect was detected.
    raiser : tuple
        Tuple of error type, callable, arguments that will raise an exception
        when then defect is detected.  Can be empty.  Check with ``if raiser !=
        (): assert_raises(*raiser)``.
    """
    str_io = StringIO()
    logger = logging.getLogger('test.logger')
    handler = logging.StreamHandler(str_io)
    logger.addHandler(handler)
    str_io.truncate(0)
    hdrc = hdr.copy()
    if level == 0:  # Should never log or raise error
        logger.setLevel(0)
        hdrc.check_fix(logger=logger, error_level=0)
        assert_equal(str_io.getvalue(), '')
        logger.removeHandler(handler)
        return hdrc, '', ()
    # Non zero defect level, test above and below threshold.
    # Set error level above defect level to prevent exception when defect
    # detected.
    e_lev = level + 1
    # Logging level above threshold, no log.
    logger.setLevel(level + 1)
    hdrc.check_fix(logger=logger, error_level=e_lev)
    assert_equal(str_io.getvalue(), '')
    # Logging level below threshold, log appears, store logged message
    logger.setLevel(level - 1)
    hdrc = hdr.copy()
    hdrc.check_fix(logger=logger, error_level=e_lev)
    assert_true(str_io.getvalue() != '')
    message = str_io.getvalue().strip()
    logger.removeHandler(handler)
    # When error level == level, check_fix should raise an error
    hdrc2 = hdr.copy()
    raiser = (HeaderDataError,
              hdrc2.check_fix,
              logger,
              level)
    return hdrc, message, raiser
Пример #44
0
    def test_server_type_check(self):
        hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6011),
                 ('127.0.0.1', 6012)]

        # sample json response from http://<host>:<port>/
        responses = {6010: 'object-server', 6011: 'container-server',
                     6012: 'account-server'}

        def mock_scout_server_type(app, host):
            url = 'http://%s:%s/' % (host[0], host[1])
            response = responses[host[1]]
            status = 200
            return url, response, status

        stdout = StringIO()
        patches = [
            mock.patch('swift.cli.recon.Scout.scout_server_type',
                       mock_scout_server_type),
            mock.patch('sys.stdout', new=stdout),
        ]

        res_object = 'Invalid: http://127.0.0.1:6010/ is object-server'
        res_container = 'Invalid: http://127.0.0.1:6011/ is container-server'
        res_account = 'Invalid: http://127.0.0.1:6012/ is account-server'
        valid = "1/1 hosts ok, 0 error[s] while checking hosts."

        # Test for object server type - default
        with nested(*patches):
            self.recon.server_type_check(hosts)

        output = stdout.getvalue()
        self.assertTrue(res_container in output.splitlines())
        self.assertTrue(res_account in output.splitlines())
        stdout.truncate(0)

        # Test ok for object server type - default
        with nested(*patches):
            self.recon.server_type_check([hosts[0]])

        output = stdout.getvalue()
        self.assertTrue(valid in output.splitlines())
        stdout.truncate(0)

        # Test for account server type
        with nested(*patches):
            self.recon.server_type = 'account'
            self.recon.server_type_check(hosts)

        output = stdout.getvalue()
        self.assertTrue(res_container in output.splitlines())
        self.assertTrue(res_object in output.splitlines())
        stdout.truncate(0)

        # Test ok for account server type
        with nested(*patches):
            self.recon.server_type = 'account'
            self.recon.server_type_check([hosts[2]])

        output = stdout.getvalue()
        self.assertTrue(valid in output.splitlines())
        stdout.truncate(0)

        # Test for container server type
        with nested(*patches):
            self.recon.server_type = 'container'
            self.recon.server_type_check(hosts)

        output = stdout.getvalue()
        self.assertTrue(res_account in output.splitlines())
        self.assertTrue(res_object in output.splitlines())
        stdout.truncate(0)

        # Test ok for container server type
        with nested(*patches):
            self.recon.server_type = 'container'
            self.recon.server_type_check([hosts[1]])

        output = stdout.getvalue()
        self.assertTrue(valid in output.splitlines())
Пример #45
0
    def test_queue_debug_reentrant(self):
        out = StringIO()
        name = '%s.test_queue_debug_reentrant' % self.__class__.__name__
        logger = logging.getLogger(name)
        logger.setLevel(logging.DEBUG)
        handler = ConfigureOutputHandler(out, out, maxlen=10)
        handler.setFormatter(logging.Formatter('%(levelname)s| %(message)s'))
        logger.addHandler(handler)

        try:
            with handler.queue_debug():
                logger.info('outer info')
                logger.debug('outer debug')
                with handler.queue_debug():
                    logger.info('inner info')
                    logger.debug('inner debug')
                    e = Exception('inner exception')
                    raise e
        except Exception as caught:
            self.assertIs(caught, e)

        self.assertEqual(
            out.getvalue(), 'outer info\n'
            'inner info\n'
            'DEBUG| outer debug\n'
            'DEBUG| inner debug\n')

        out.seek(0)
        out.truncate()

        try:
            with handler.queue_debug():
                logger.info('outer info')
                logger.debug('outer debug')
                with handler.queue_debug():
                    logger.info('inner info')
                    logger.debug('inner debug')
                e = Exception('outer exception')
                raise e
        except Exception as caught:
            self.assertIs(caught, e)

        self.assertEqual(
            out.getvalue(), 'outer info\n'
            'inner info\n'
            'DEBUG| outer debug\n'
            'DEBUG| inner debug\n')

        out.seek(0)
        out.truncate()

        with handler.queue_debug():
            logger.info('outer info')
            logger.debug('outer debug')
            with handler.queue_debug():
                logger.info('inner info')
                logger.debug('inner debug')
                logger.error('inner error')
        self.assertEqual(
            out.getvalue(), 'outer info\n'
            'inner info\n'
            'DEBUG| outer debug\n'
            'DEBUG| inner debug\n'
            'ERROR| inner error\n')

        out.seek(0)
        out.truncate()

        with handler.queue_debug():
            logger.info('outer info')
            logger.debug('outer debug')
            with handler.queue_debug():
                logger.info('inner info')
                logger.debug('inner debug')
            logger.error('outer error')
        self.assertEqual(
            out.getvalue(), 'outer info\n'
            'inner info\n'
            'DEBUG| outer debug\n'
            'DEBUG| inner debug\n'
            'ERROR| outer error\n')
Пример #46
0
class TestResult(unittest.TestResult):
    """Holder for test result information.

    Test results are automatically managed by the TestCase and TestSuite
    classes, and do not need to be explicitly manipulated by writers of tests.

    Each instance holds the total number of tests run, and collections of
    failures and errors that occurred among those test runs. The collections
    contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
    formatted traceback of the error that occurred.
    """
    _previousTestClass = None
    _moduleSetUpFailed = False

    def __init__(self):
        self.failfast = False
        self.failures = []
        self.passes = []
        self.errors = []
        self.cleanup_errors = []
        self.testsRun = 0
        self.skipped = []
        self.expectedFailures = []
        self.unexpectedSuccesses = []
        self.shouldStop = False
        self.buffer = False
        self._stdout_buffer = None
        self._stderr_buffer = None
        self._original_stdout = sys.stdout
        self._original_stderr = sys.stderr
        self._mirrorOutput = False

    def startTest(self, test):
        "Called when the given test is about to be run"
        self.testsRun += 1
        self._mirrorOutput = False
        if self.buffer:
            if self._stderr_buffer is None:
                self._stderr_buffer = SixStringIO()
                self._stdout_buffer = SixStringIO()
            sys.stdout = self._stdout_buffer
            sys.stderr = self._stderr_buffer

    def startTestRun(self):
        """Called once before any tests are executed.

        See startTest for a method called before each test.
        """

    def stopTest(self, test):
        """Called when the given test has been run"""
        if self.buffer:
            if self._mirrorOutput:
                output = sys.stdout.getvalue()
                error = sys.stderr.getvalue()
                if output:
                    if not output.endswith('\n'):
                        output += '\n'
                    self._original_stdout.write(STDOUT_LINE % output)
                if error:
                    if not error.endswith('\n'):
                        error += '\n'
                    self._original_stderr.write(STDERR_LINE % error)

            sys.stdout = self._original_stdout
            sys.stderr = self._original_stderr
            self._stdout_buffer.seek(0)
            self._stdout_buffer.truncate()
            self._stderr_buffer.seek(0)
            self._stderr_buffer.truncate()
        self._mirrorOutput = False

    def stopTestRun(self):
        """Called once after all tests are executed.

        See stopTest for a method called after each test.
        """

    @failfast
    def addError(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info().
        """
        self.errors.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    def addCleanupError(self, test, err):
        """Called when an error has occurred during cleanup. 'err' is a tuple of
        values as returned by sys.exc_info().
        """
        self.cleanup_errors.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    @failfast
    def addFailure(self, test, err):
        """Called when an error has occurred. 'err' is a tuple of values as
        returned by sys.exc_info()."""
        self.failures.append((test, self._exc_info_to_string(err, test)))
        self._mirrorOutput = True

    def addSuccess(self, test):
        "Called when a test has completed successfully"
        self.passes.append(test)
        pass

    def addSkip(self, test, reason):
        """Called when a test is skipped."""
        self.skipped.append((test, reason))

    def addExpectedFailure(self, test, err, bugnumber):
        """Called when an expected failure/error occured."""
        self.expectedFailures.append(
            (test, self._exc_info_to_string(err, test)))

    @failfast
    def addUnexpectedSuccess(self, test, bugnumber):
        """Called when a test was expected to fail, but succeed."""
        self.unexpectedSuccesses.append(test)

    def wasSuccessful(self):
        "Tells whether or not this result was a success"
        return (len(self.failures) + len(self.errors) == 0)

    def stop(self):
        "Indicates that the tests should be aborted"
        self.shouldStop = True

    def _exc_info_to_string(self, err, test):
        """Converts a sys.exc_info()-style tuple of values into a string."""
        exctype, value, tb = err
        # Skip test runner traceback levels
        while tb and self._is_relevant_tb_level(tb):
            tb = tb.tb_next
        if exctype is test.failureException:
            # Skip assert*() traceback levels
            length = self._count_relevant_tb_levels(tb)
            msgLines = traceback.format_exception(exctype, value, tb, length)
        else:
            msgLines = traceback.format_exception(exctype, value, tb)

        if self.buffer:
            output = sys.stdout.getvalue()
            error = sys.stderr.getvalue()
            if output:
                if not output.endswith('\n'):
                    output += '\n'
                msgLines.append(STDOUT_LINE % output)
            if error:
                if not error.endswith('\n'):
                    error += '\n'
                msgLines.append(STDERR_LINE % error)
        return ''.join(msgLines)

    def _is_relevant_tb_level(self, tb):
        return '__unittest' in tb.tb_frame.f_globals

    def _count_relevant_tb_levels(self, tb):
        length = 0
        while tb and not self._is_relevant_tb_level(tb):
            length += 1
            tb = tb.tb_next
        return length

    def __repr__(self):
        return "<%s run=%i errors=%i failures=%i>" % \
               (util.strclass(self.__class__), self.testsRun, len(self.errors),
                len(self.failures))
Пример #47
0
    def test_queue_debug(self):
        out = StringIO()
        name = '%s.test_queue_debug' % self.__class__.__name__
        logger = logging.getLogger(name)
        logger.setLevel(logging.DEBUG)
        handler = ConfigureOutputHandler(out, out, maxlen=3)
        handler.setFormatter(logging.Formatter('%(levelname)s:%(message)s'))
        logger.addHandler(handler)

        with handler.queue_debug():
            logger.info('checking bar... ')
            logger.debug('do foo')
            logger.info('yes')
            logger.info('qux')

        self.assertEqual(out.getvalue(), 'checking bar... yes\n' 'qux\n')

        out.seek(0)
        out.truncate()

        with handler.queue_debug():
            logger.info('checking bar... ')
            logger.debug('do foo')
            logger.info('no')
            logger.error('fail')

        self.assertEqual(
            out.getvalue(), 'checking bar... no\n'
            'DEBUG:do foo\n'
            'ERROR:fail\n')

        out.seek(0)
        out.truncate()

        with handler.queue_debug():
            logger.info('checking bar... ')
            logger.debug('do foo')
            logger.debug('do bar')
            logger.debug('do baz')
            logger.info('no')
            logger.error('fail')

        self.assertEqual(
            out.getvalue(), 'checking bar... no\n'
            'DEBUG:do foo\n'
            'DEBUG:do bar\n'
            'DEBUG:do baz\n'
            'ERROR:fail\n')

        out.seek(0)
        out.truncate()

        with handler.queue_debug():
            logger.info('checking bar... ')
            logger.debug('do foo')
            logger.debug('do bar')
            logger.debug('do baz')
            logger.debug('do qux')
            logger.debug('do hoge')
            logger.info('no')
            logger.error('fail')

        self.assertEqual(
            out.getvalue(), 'checking bar... no\n'
            'DEBUG:<truncated - see config.log for full output>\n'
            'DEBUG:do baz\n'
            'DEBUG:do qux\n'
            'DEBUG:do hoge\n'
            'ERROR:fail\n')

        out.seek(0)
        out.truncate()

        try:
            with handler.queue_debug():
                logger.info('checking bar... ')
                logger.debug('do foo')
                logger.debug('do bar')
                logger.info('no')
                e = Exception('fail')
                raise e
        except Exception as caught:
            self.assertIs(caught, e)

        self.assertEqual(
            out.getvalue(), 'checking bar... no\n'
            'DEBUG:do foo\n'
            'DEBUG:do bar\n')