Ejemplo n.º 1
0
    def test_logger_memory(self):

        output = StringIO()

        logger = Logger.get('memory', output, OutputStream,
                            level=logging.INFO,
                            memory=True,
                            memory_capacity=2,
                            memory_flushlevel=logging.CRITICAL)

        # under max capacity
        logger.info('1')
        output.seek(0)
        lines = len(output.readlines())
        self.assertEqual(lines, 0)

        # max capacity
        logger.info('2')
        output.seek(0)
        lines = len(output.readlines())
        self.assertEqual(lines, 2)

        # under max capacity but flush level
        logger.critical('3')
        output.seek(0)
        lines = len(output.readlines())
        self.assertEqual(lines, 3)
Ejemplo n.º 2
0
 def compare_configs(self, f1, f2):
     """
     Compare two configs and output diffs
     :param f1: file or str of first config
     :param f2: file or str of second config
     :return:
     """
     self.log.info("Comparing configs ...")
     added = ""
     removed = ""
     same = True
     try:
         stream1 = open(f1, "r")
         stream2 = open(f2, "r")
     except:
         from io import StringIO
         stream1 = StringIO(f1)
         stream2 = StringIO(f2)
     stream1 = [line.lstrip() for line in stream1.readlines()[: -1]]
     stream2 = [line.lstrip() for line in stream2.readlines()[: -1]]
     diff = difflib.ndiff(stream1, stream2)
     for line in diff:
         if line.startswith('+') and not line.startswith(('+ # Generated', '+ # Finished', '+ !!')):
             added = added + line[2:]
             same = False
         elif line.startswith('-') and not line.startswith(('- # Generated', '- # Finished', '- !!')):
             removed = removed + line[2:]
             same = False
     if same:
         self.log.info("Pre and Post device configs are identical!")
     return [same, added, removed]
Ejemplo n.º 3
0
def check_source(nb, filename):
    """
    Run pyflakes on a notebook, wil catch errors such as missing passed
    parameters that do not have default values
    """
    from pyflakes.api import check as pyflakes_check
    from pyflakes.reporter import Reporter

    # concatenate all cell's source code in a single string
    source = '\n'.join([c['source'] for c in nb.cells])

    # this objects are needed to capture pyflakes output
    warn = StringIO()
    err = StringIO()
    reporter = Reporter(warn, err)

    # run pyflakes.api.check on the source code
    pyflakes_check(source, filename=filename, reporter=reporter)

    warn.seek(0)
    err.seek(0)

    # return any error messages returned by pyflakes
    return {
        'warnings': '\n'.join(warn.readlines()),
        'errors': '\n'.join(err.readlines())
    }
Ejemplo n.º 4
0
def stringio_write():
    # 将函数返回的数据在内存中写
    sio = StringIO()
    sio.write(abc)
    # 可以用StringIO本身的方法查看
    s = sio.getvalue()
    print(s)

    print("-----")
    # 如果你用file-like object的方法查看的时候,你会发现数据为空
    sio = StringIO()
    sio.write(s)
    print(sio.tell())
    for i in sio.readlines():
        print(i.strip())

    print("修改指针位置:")
    # 这时候我们需要修改下文件的指针位置
    # 我们发现可以打印出内容了
    sio = StringIO()
    sio.write(s)

    # sio是一个追加行为
    sio.write("aaaaaaa")
    sio.seek(0, 0)
    print(sio.tell())
    for i in sio.readlines():
        print(i.strip())
    def test_split(self):
        # Arrange
        data = """Test line 1.	1	19938376
Test line 2.	2	19938376
Test line 3.	2	19938376
Test line 4.	1	19938376
"""
        expected_lines = StringIO(data).readlines()
        sut = BC3ASTPreprocess()
        output_train = StringIO()
        output_test = StringIO()

        # Act
        sut.split(StringIO(data),
                  outfile_handle_1=output_train,
                  outfile_handle_2=output_test,
                  split=.5)

        # Assert
        output_train.seek(0)
        output_test.seek(0)
        train_lines = output_train.readlines()
        test_lines = output_test.readlines()

        self.assertEqual(2, len(train_lines))
        self.assertSequenceEqual(expected_lines,
                                 sorted(train_lines + test_lines))
Ejemplo n.º 6
0
def grade_string(expected, actual):
    """Grades a single response against the true (actual) output.
    Possible return values are:
        * MATCH_EXACT (perfect match)
        * MATCH_LINES (correct number of lines, non-formatting characters match)
        * MATCH_VALUES (non-formatting characters match)
        * MATCH_NONE (no match)"""
    # Convert to universal newlines, strip extraneous whitespace
    expected_io = StringIO(unicode(expected.strip()), newline=None)
    actual_io = StringIO(unicode(actual.strip()), newline=None)

    expected_str = expected_io.read()
    actual_str = actual_io.read()

    # Pefect match
    if expected_str == actual_str:
        return MATCH_EXACT

    format_chars = ['[', ']', ',', ' ', '\n', '"', '\'']
    table = dict.fromkeys(map(ord, format_chars), None)

    expected_io.seek(0)
    expected_lines = [line.strip() for line in expected_io.readlines()]
    actual_io.seek(0)
    actual_lines = [line.strip() for line in actual_io.readlines()]

    # Remove blank lines
    removed_blanks = False

    if len(expected_lines) != len(actual_lines):
        actual_lines = [line for line in actual_lines if len(line.strip()) > 0]
        removed_blanks = True

    # Check for line by line exact/partial match
    if len(expected_lines) == len(actual_lines):
        exact_match = True
        partial_match = False

        for (e_line, a_line) in zip(expected_lines, actual_lines):
            if e_line != a_line:
                exact_match = False
                if (e_line.translate(table).lower() == a_line.translate(
                        table).lower()):
                    partial_match = True
                else:
                    partial_match = False
                    break

        if exact_match:
            return MATCH_EXACT if not removed_blanks else MATCH_LINES
        elif partial_match:
            return MATCH_LINES

    # Check for partial match of values only
    if expected_str.translate(table).lower() == actual_str.translate(
            table).lower():
        return MATCH_VALUES

    return MATCH_NONE
Ejemplo n.º 7
0
def grade_string(expected, actual):
    """Grades a single response against the true (actual) output.
    Possible return values are:
        * MATCH_EXACT (perfect match)
        * MATCH_LINES (correct number of lines, non-formatting characters match)
        * MATCH_VALUES (non-formatting characters match)
        * MATCH_NONE (no match)"""
    # Convert to universal newlines, strip extraneous whitespace
    expected_io = StringIO(unicode(expected.strip()), newline=None)
    actual_io = StringIO(unicode(actual.strip()), newline=None)

    expected_str = expected_io.read()
    actual_str = actual_io.read()

    # Pefect match
    if expected_str == actual_str:
        return MATCH_EXACT

    format_chars = ['[', ']', ',', ' ', '\n', '"', '\'']
    table = dict.fromkeys(map(ord, format_chars), None)

    expected_io.seek(0)
    expected_lines = [line.strip() for line in expected_io.readlines()]
    actual_io.seek(0)
    actual_lines = [line.strip() for line in actual_io.readlines()]

    # Remove blank lines
    removed_blanks = False

    if len(expected_lines) != len(actual_lines):
        actual_lines = [line for line in actual_lines if len(line.strip()) > 0]
        removed_blanks = True

    # Check for line by line exact/partial match
    if len(expected_lines) == len(actual_lines):
        exact_match = True
        partial_match = False

        for (e_line, a_line) in zip(expected_lines, actual_lines):
            if e_line != a_line:
                exact_match = False
                if (e_line.translate(table).lower() == a_line.translate(table).lower()):
                    partial_match = True
                else:
                    partial_match = False
                    break

        if exact_match:
            return MATCH_EXACT if not removed_blanks else MATCH_LINES
        elif partial_match:
            return MATCH_LINES

    # Check for partial match of values only
    if expected_str.translate(table).lower() == actual_str.translate(table).lower():
        return MATCH_VALUES

    return MATCH_NONE
Ejemplo n.º 8
0
def grade_string(expected, actual):
    # Convert to universal newlines, strip extraneous whitespace
    expected_io = StringIO(unicode(expected.strip()), newline=None)
    actual_io = StringIO(unicode(actual.strip()), newline=None)

    expected_str = expected_io.read()
    actual_str = actual_io.read()

    # Pefect match
    if expected_str == actual_str:
        return "exact"

    table = dict.fromkeys(map(ord, FORMAT_CHARS), None)

    expected_io.seek(0)
    expected_lines = [line.strip() for line in expected_io.readlines()]
    actual_io.seek(0)
    actual_lines = [line.strip() for line in actual_io.readlines()]

    # Remove blank lines
    removed_blanks = False

    if len(expected_lines) != len(actual_lines):
        actual_lines = [line for line in actual_lines if len(line.strip()) > 0]
        removed_blanks = True

    # Check for line by line exact/partial match
    if len(expected_lines) == len(actual_lines):
        exact_match = True
        partial_match = False

        for (e_line, a_line) in zip(expected_lines, actual_lines):
            if e_line != a_line:
                exact_match = False
                if (e_line.translate(table).lower() == a_line.translate(
                        table).lower()):
                    partial_match = True
                else:
                    partial_match = False
                    break

        if exact_match:
            return "exact" if not removed_blanks else "lines"
        elif partial_match:
            return "lines"

    # Check for partial match of values only
    if expected_str.translate(table).lower() == actual_str.translate(
            table).lower():
        return "values"

    return None
Ejemplo n.º 9
0
 def clean(self, text1, text2):
     """remove blank space and carriage returns, we may not care about these in the test results"""
     text1 = StringIO(text1)
     text2 = StringIO(text2)
     
     text1_clean = ''
     for line in text1.readlines():
         text1_clean += line.strip()
     
     text2_clean = ''
     for line in text2.readlines():
         text2_clean += line.strip()
     return text1_clean.replace("\n", ''), text2_clean.replace("\n", '')
Ejemplo n.º 10
0
def grade_string(expected, actual):
    # Convert to universal newlines, strip extraneous whitespace
    expected_io = StringIO(unicode(expected.strip()), newline=None)
    actual_io = StringIO(unicode(actual.strip()), newline=None)

    expected_str = expected_io.read()
    actual_str = actual_io.read()

    # Pefect match
    if expected_str == actual_str:
        return "exact"

    table = dict.fromkeys(map(ord, FORMAT_CHARS), None)

    expected_io.seek(0)
    expected_lines = [line.strip() for line in expected_io.readlines()]
    actual_io.seek(0)
    actual_lines = [line.strip() for line in actual_io.readlines()]

    # Remove blank lines
    removed_blanks = False

    if len(expected_lines) != len(actual_lines):
        actual_lines = [line for line in actual_lines if len(line.strip()) > 0]
        removed_blanks = True

    # Check for line by line exact/partial match
    if len(expected_lines) == len(actual_lines):
        exact_match = True
        partial_match = False

        for (e_line, a_line) in zip(expected_lines, actual_lines):
            if e_line != a_line:
                exact_match = False
                if (e_line.translate(table).lower() == a_line.translate(table).lower()):
                    partial_match = True
                else:
                    partial_match = False
                    break

        if exact_match:
            return "exact" if not removed_blanks else "line"
        elif partial_match:
            return "line"

    # Check for partial match of values only
    if expected_str.translate(table).lower() == actual_str.translate(table).lower():
        return "values"

    return None
Ejemplo n.º 11
0
 def do_POST(self):
     """Serve a POST request."""
     r, info = self.deal_post_data()
     print(r, info, "by: ", self.client_address)
     f = StringIO()
     f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
     f.write("<html>\n<title>Upload Result Page</title>\n")
     f.write("<body>\n<h2>Upload Result Page</h2>\n")
     f.write("<hr>\n")
     if r:
         f.write("<strong>Success:</strong>")
     else:
         f.write("<strong>Failed:</strong>")
     f.write(info)
     f.write("<br><a href=\"%s\">back</a>" % self.headers['referer'])
     f.write("<hr><small>Powered By: bones7456, Ported By:0312birdzhang ")
     f.write("here</a>.</small></body>\n</html>\n")
     length = f.tell()
     f.seek(0)
     self.send_response(200)
     self.send_header("Content-type", "text/html")
     self.send_header("Content-Length", str(length))
     self.end_headers()
     if f:
         for i in f.readlines():
         #self.copyfile(f, self.wfile)
             self.wfile.write(i.encode("utf-8"))
         f.close()
Ejemplo n.º 12
0
    def read(self, markdown_file, populator):
        process = False

        # makes an empty list where we are going to store robot code
        robot_lines = []
        # opens our file under alias 'md_file' and operates all the following
        # statments on it
        with open(markdown_file) as md_file:
            # creates a boolean var
            include_line = False
            # for each line of the file that we passed as arguement to this script
            # do the steps below
            for line in md_file:
                if not include_line:
                    include_line = line.strip().lower() == "```robotframework"
                elif line.strip() == "```":
                    include_line = False
                else:
                    robot_lines.append(line)
        robot_data = str(''.join(robot_lines))
        print(robot_data)
        # txtfile = BytesIO(robot_data.encode('UTF-8'))
        a = StringIO(robot_data)

        for row in a.readlines():
            row = self._process_row(row)
            cells = [self._process_cell(cell) for cell in self.split_row(row)]
            if cells and cells[0].strip().startswith('*') and \
                    populator.start_table([c.replace('*', '') for c in cells]):
                process = True
            elif process:
                populator.add(cells)
        populator.eof()
Ejemplo n.º 13
0
def text_to_document(text):
    io = StringIO(text)
    d = { "abstract": "" }
    sec = "abstract"
    ignore = False
    extra = [ "References", "External links", "Further reading", "See also" ]

    for line in io.readlines():
        m = re.match(RE_09, line)
        if m != None:
            mg1 = m.group(1).strip()
            if mg1 in extra:
                ignore = True
            else:
                ignore = False
                sec = mg1
                d[sec] = ""
        else:
            if ignore:
                pass
            else:
                d[sec] += line.lstrip("*")

    for key in list(d.keys()):
        s = condense_text(d[key]).replace("\n", " ").strip()
        if len(s) == 0:
            del d[key]
        else:
            d[key] = s

    return d
Ejemplo n.º 14
0
    def test_writing(self) -> None:
        written = StringIO()
        write_respin(written, self.respin)
        written.seek(0)

        with open("tests/test_respin_format.respin") as f:
            self.assertListEqual(f.readlines(), written.readlines())
Ejemplo n.º 15
0
    def testLAMMPS():
        """Test potentials.writePotentials() for LAMMPS"""

        pota = potentials.Potential("A", "B", lambda x: x)
        potb = potentials.Potential("C", "D", lambda x: 6.0 - x)

        expect = [
            "A-B", "N 6 R 1.00000000 6.00000000", "",
            "1 1.00000000 1.00000000 -1.00000000",
            "2 2.00000000 2.00000000 -1.00000000",
            "3 3.00000000 3.00000000 -1.00000000",
            "4 4.00000000 4.00000000 -1.00000000",
            "5 5.00000000 5.00000000 -1.00000000",
            "6 6.00000000 6.00000000 -1.00000000", "", "C-D",
            "N 6 R 1.00000000 6.00000000", "",
            "1 1.00000000 5.00000000 1.00000000",
            "2 2.00000000 4.00000000 1.00000000",
            "3 3.00000000 3.00000000 1.00000000",
            "4 4.00000000 2.00000000 1.00000000",
            "5 5.00000000 1.00000000 1.00000000",
            "6 6.00000000 0.00000000 1.00000000"
        ]

        sbuild = StringIO()
        potentials.writePotentials('LAMMPS', [pota, potb], 6.0, 6, sbuild)
        sbuild.seek(0)
        actual = sbuild.readlines()
        msg = "%s != %s" % (expect, actual)

        assert len(expect) == len(actual), msg
        for e, a in zip(expect, actual):
            a = a.decode()
            assert os.linesep == a[-1]
            a = a[:-1]
            assert e == a
def do_clean(number=0):
    """
    Cleans old versions from folder
    """
    fh = StringIO()
    my_names = []
    number = int(number)
    if (number == 0):
        number = 1
    revl = "ls -ltr versions | rev | cut -d ' ' -f1 | rev"
    value = local(revl, capture=True)
    for line in value.splitlines():
        my_names.append(line)
    my_names.pop(0)
    for i in range(len(my_names) - number):
        local("rm -rf versions/{}".format(my_names[i]))
    # For remote server
    code = "ls -ltr /data/web_static/releases | rev | cut -d ' ' -f1 | rev"
    fk_value = sudo(code, stdout=fh)
    fh.seek(0)
    my_fk_names = []
    for line in fh.readlines():
        data = line.split()[-1]
        if data.startswith("web_static"):
            my_fk_names.append(data)
    for i in range(len(my_fk_names) - number):
        sudo("rm -rf /data/web_static/releases/{}".format(my_fk_names[i]))
Ejemplo n.º 17
0
def report_meta():
    src_folder = PurePath("/tmp")

    filenames = """
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_10m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_eq1000_ineq_10m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_eq100_ineq100_10m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_eq100_ineq100_minRRF0.1_10m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_eq100_ineq100_minRRF0_90m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_m5-3_eq100_ineq100_minRRF0_10m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_m5-3_eq100_ineq100_minRRF0_bval0.94_2m_sf.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_m5-3_eq100_ineq100_minRRF0_bval0.94_480m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_m5-3_eq10_ineq1000_minRRF0_10m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_m5-3_eq10_ineq1000_minRRF0_bval0.94_180m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_m5-3_eq10_ineq1000_minRRF0_bval0.94_2m_sf.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_m5-40_eq100_ineq100_minRRF0_10m.zip
    /tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_m5-5_eq100_ineq100_minRRF0_10m.zip
    """

    paths = StringIO(filenames)

    for path in paths.readlines():
        filepath = path.strip()
        report_name = filepath.replace(
            '/tmp/CFM_hk_slipdef0_scaling_TMG_solution_TEST_Non0_', '')[:-4]
        yield (report_name, filepath)
Ejemplo n.º 18
0
    def main():
        robot_lines = []
        with open(markdown_file) as f:
            include_line = False
            for line in f:
                if not include_line:
                    include_line = line.strip().lower() == "```robotframework"
                elif line.strip() == "```":
                    include_line = False
                else:
                    robot_lines.append(line)
        robot_data = str(''.join(robot_lines))
        print(robot_data)
        # txtfile = BytesIO(robot_data.encode('UTF-8'))
        a = StringIO(robot_data)

        for row in a.readlines():
            row = self._process_row(row)
            cells = [self._process_cell(cell) for cell in self.split_row(row)]
            if cells and cells[0].strip().startswith('*') and \
                    populator.start_table([c.replace('*', '') for c in cells]):
                process = True
            elif process:
                populator.add(cells)
        populator.eof()
def main():
    # Generates ventilator data
    fake = Faker()
    fakeIO = StringIO()
    fake_ventilator_records = generate_ventilator_records(fake)

    fakeIO.write(str(''.join(dumps_lines(fake_ventilator_records))))

    # Write the json file to S3
    s3key = s3_load_prefix + filename
    s3object = s3.Object(s3_bucket, s3key)
    s3object.put(Body=(bytes(fakeIO.getvalue().encode('utf-8'))))
    fakeIO.close()

    time.sleep(5)

    ventilator_object= s3.Object(s3_bucket, s3key)
    ventilator_decoded_data = ventilator_object.get()['Body'].read().decode('utf-8')

    ventilator_stringio_data = StringIO(ventilator_decoded_data)

    # Read data line by line
    data = ventilator_stringio_data.readlines()

    # Deserialize json data
    ventilator_json_data = list(map(json.loads, data))

    # Load DDB table
    load_dynamodb_table(ventilator_json_data)
Ejemplo n.º 20
0
 def _choices_as_array(self):
     from io import StringIO
     valuebuffer = StringIO(self.list_values)
     choices = [[item.strip(), item.strip()]
                for item in valuebuffer.readlines()]
     valuebuffer.close()
     return choices
 def testLogStringStream(self):
     """Test case -   context manager - to custom string stream"""
     try:
         myLen = self.__logRecordMax
         dataList = [i for i in range(1, myLen)]
         logger.debug("dataList %d:  %r", len(dataList), dataList)
         #
         slogger = logging.getLogger()
         slogger.propagate = False
         for handler in slogger.handlers:
             slogger.removeHandler(handler)
         #
         stream = StringIO()
         sh = logging.StreamHandler(stream=stream)
         sh.setLevel(logging.DEBUG)
         fmt = logging.Formatter("STRING-%(processName)s: %(message)s")
         sh.setFormatter(fmt)
         slogger.addHandler(sh)
         #
         with MultiProcLogging(logger=slogger,
                               fmt=self.__mpFormat,
                               level=logging.DEBUG) as wlogger:
             for ii in range(myLen):
                 wlogger.error("context logging record %d", ii)
         #
         stream.seek(0)
         logLines = stream.readlines()
         logger.debug(">> dataList %d:  %r", len(logLines), logLines)
         self.assertEqual(len(logLines), myLen)
         for line in logLines:
             self.assertIn("context logging record", line)
     except Exception as e:
         logger.exception("context logging record %s", str(e))
         self.fail()
Ejemplo n.º 22
0
 def test_simple(self):
     f = StringIO()
     c = Commit()
     c.committer = c.author = "Jelmer <*****@*****.**>"
     c.commit_time = c.author_time = 1271350201
     c.commit_timezone = c.author_timezone = 0
     c.message = "This is the first line\nAnd this is the second line.\n"
     c.tree = Tree().id
     write_commit_patch(f, c, "CONTENTS", (1, 1), version="custom")
     f.seek(0)
     lines = f.readlines()
     self.assertTrue(lines[0].startswith("From 0b0d34d1b5b596c928adc9a727a4b9e03d025298"))
     self.assertEqual(lines[1], "From: Jelmer <*****@*****.**>\n")
     self.assertTrue(lines[2].startswith("Date: "))
     self.assertEqual([
         "Subject: [PATCH 1/1] This is the first line\n",
         "And this is the second line.\n",
         "\n",
         "\n",
         "---\n"], lines[3:8])
     self.assertEqual([
         "CONTENTS-- \n",
         "custom\n"], lines[-2:])
     if len(lines) >= 12:
         # diffstat may not be present
         self.assertEqual(lines[8], " 0 files changed\n")
Ejemplo n.º 23
0
    def test_graph_disconnected_to_dot(self):
        dependencies_expected = (
            ('towel-stuff', 'bacon', 'bacon (<=0.2)'),
            ('grammar', 'bacon', 'truffles (>=1.2)'),
            ('choxie', 'towel-stuff', 'towel-stuff (0.1)'),
            ('banana', 'strawberry', 'strawberry (>=0.5)'),
        )
        disconnected_expected = ('cheese', 'bacon', 'strawberry')

        dists = []
        for name in self.DISTROS_DIST + self.DISTROS_EGG:
            dist = get_distribution(name, use_egg_info=True)
            self.assertNotEqual(dist, None)
            dists.append(dist)

        graph = depgraph.generate_graph(dists)
        buf = StringIO()
        depgraph.graph_to_dot(graph, buf, skip_disconnected=False)
        buf.seek(0)
        lines = buf.readlines()

        dependencies_lines = []
        disconnected_lines = []

        # First sort output lines into dependencies and disconnected lines.
        # We also skip the attribute lines, and don't include the "{" and "}"
        # lines.
        disconnected_active = False
        for line in lines[1:-1]:  # Skip first and last line
            if line.startswith('subgraph disconnected'):
                disconnected_active = True
                continue
            if line.startswith('}') and disconnected_active:
                disconnected_active = False
                continue

            if disconnected_active:
                # Skip the 'label = "Disconnected"', etc. attribute lines.
                if ' = ' not in line:
                    disconnected_lines.append(line)
            else:
                dependencies_lines.append(line)

        dependencies_matches = []
        for line in dependencies_lines:
            if line[-1] == '\n':
                line = line[:-1]
            match = self.EDGE.match(line.strip())
            self.assertIsNot(match, None)
            dependencies_matches.append(match.groups())

        disconnected_matches = []
        for line in disconnected_lines:
            if line[-1] == '\n':
                line = line[:-1]
            line = line.strip('"')
            disconnected_matches.append(line)

        self.checkLists(dependencies_matches, dependencies_expected)
        self.checkLists(disconnected_matches, disconnected_expected)
Ejemplo n.º 24
0
 def smi(self, subdata):
     if requests_version < 0x20300:
         subdata = subdata.content.decode("latin")
     else:
         subdata.encoding = "ISO-8859-1"
         subdata = subdata.text
     ssubdata = StringIO(subdata)
     timea = 0
     number = 1
     data = None
     subs = ""
     TAG_RE = re.compile(r'<(?!\/?i).*?>')
     bad_char = re.compile(r'\x96')
     for i in ssubdata.readlines():
         i = i.rstrip()
         sync = re.search(r"<SYNC Start=(\d+)>", i)
         if sync:
             if int(sync.group(1)) != int(timea):
                 if data and data != "&nbsp;":
                     subs += "%s\n%s --> %s\n" % (number, timestr(timea), timestr(sync.group(1)))
                     text = "%s\n" % TAG_RE.sub('', data.replace("<br>", "\n"))
                     text = decode_html_entities(text)
                     if text[len(text) - 2] != "\n":
                         text += "\n"
                     subs += text
                     number += 1
             timea = sync.group(1)
         text = re.search("<P Class=SVCC>(.*)", i)
         if text:
             data = text.group(1)
     recomp = re.compile(r'\r')
     text = bad_char.sub('-', recomp.sub('', subs))
     return text
Ejemplo n.º 25
0
    def __init__(self, filename):
        self.filename = filename
        buf = StringIO()
        with open(filename) as f_in:
            buf.write(re.sub(r'\\\n\s*', '', f_in.read()))

        buf.seek(0)
        for line in buf.readlines():
            if line[0] == '!' or line[:3] == 'END' or len(line) < 2:
                continue

            if not line[0] == ' ':
                parent = self
                c, data = line.split(' ', 1)
            else:
                assert data, 'Attempted to nest with no parent'
                if parent is self:
                    parent = data
                c, data = line[1:].split(' ', 1)

            c = sanitise(c)
            data = to_struct(data)

            if hasattr(parent, c):
                if not isinstance(getattr(parent, c), (tuple, list)):
                    setattr(parent, c, [getattr(parent, c)])
                getattr(parent, c).append(data)
            else:
                setattr(parent, c, data)
        buf.close()
Ejemplo n.º 26
0
def inspect_email_text(text):
    """Extracts key information from email header in
    text.
    """

    sio = StringIO(text)
    required_fields = ["From", "Subject", "Date"]  # TODO: some emails don't
                                                   # have dates!
    optional_fields = ["To", "Reply-To"]

    data = {'Subject': "", 'To': ""}
    while True:

        line = sio.readline().rstrip()
        if line == '':
            break

        for field in required_fields + optional_fields:
            start_string = field + ": "
            if line.startswith(start_string):
                data[field] = line[len(start_string):]

    tests = [field in data.keys() for field in required_fields]

    if not all(tests):
        print("Some required email fields were missing")
        import pdb; pdb.set_trace()

    if line == '':
        line = sio.readline().rstrip()
    else:
        import pdb; pdb.set_trace()
    data['Body'] = sio.readlines()

    return data
def generatefile():
    """File write, extra detection of changes in existing file
  return True on any issues, but only if there is changes"""
    # get file path
    foutpath = getmdfile()
    if ARGS.verbose:
        print("Output path: {}".format(str(foutpath)))
    # write data to temp memorystream
    ftemp = StringIO()
    ret = generate(ftemp)
    # get old filedata, skipping header
    with getmdfile().open("r", encoding="utf-8") as forg:
        olddata = forg.readlines()[3:]
    # get new data, skip first empty line
    ftemp.seek(0)
    newdata = ftemp.readlines()[1:]
    # if new data is same as old we don't need to write anything
    if newdata == olddata:
        print("No changes, exit without write")
        return False
    # write output
    with foutpath.open("w", encoding="utf-8") as fout:
        fout.write(getmarkdownheader())
        fout.write(ftemp.getvalue())

    return ret
Ejemplo n.º 28
0
 def _get_request_results(self):
     for chunk in iter(self.input_queue.get, 'STOP'):
         accessions_to_retrieve = ' '.join(chunk)
         response = self._make_request(accessions_to_retrieve)
         go_info = StringIO(response)
         self.output_queue.put([_.rstrip() for _ in go_info.readlines()])
     self.output_queue.put('DONE')
Ejemplo n.º 29
0
def extract_imgur_album_urls(album_url):
	"""
	Given an imgur album URL, attempt to extract the images within that
	album

	Returns:
		List of qualified imgur URLs
	"""
	response = request(album_url)
	info = response.info()

	# Rudimentary check to ensure the URL actually specifies an HTML file
	if 'content-type' in info and not info['content-type'].startswith('text/html'):
		return []

	filedata = response.read()
	# TODO: stop parsing HTML with regexes.
	match = re.compile(r'\"hash\":\"(.[^\"]*)\",\"title\"')
	items = []

	memfile = StringIO(filedata.decode('utf-8'))
	#print("REALLY LOOKING")
	for line in memfile.readlines():
		results = re.findall(match, line)
		if not results:
			continue

		items += results

	memfile.close()
	# TODO : url may contain gif image.
	urls = ['http://i.imgur.com/%s.jpg' % (imghash) for imghash in items]

	return urls
Ejemplo n.º 30
0
    def test_graph_bad_version_to_dot(self):
        expected = (
            ('towel-stuff', 'bacon', 'bacon (<=0.2)'),
            ('grammar', 'bacon', 'truffles (>=1.2)'),
            ('choxie', 'towel-stuff', 'towel-stuff (0.1)'),
            ('banana', 'strawberry', 'strawberry (>=0.5)'),
        )

        dists = []
        for name in self.DISTROS_DIST + self.DISTROS_EGG + self.BAD_EGGS:
            dist = get_distribution(name, use_egg_info=True)
            self.assertNotEqual(dist, None)
            dists.append(dist)

        graph = depgraph.generate_graph(dists)
        buf = StringIO()
        depgraph.graph_to_dot(graph, buf)
        buf.seek(0)
        matches = []
        lines = buf.readlines()
        for line in lines[1:-1]:  # skip the first and the last lines
            if line[-1] == '\n':
                line = line[:-1]
            match = self.EDGE.match(line.strip())
            self.assertIsNot(match, None)
            matches.append(match.groups())

        self.checkLists(matches, expected)
Ejemplo n.º 31
0
 def test_list_files(self):
     out = StringIO()
     call_command("minio", "ls", stdout=out)
     out.seek(0)
     lines = sorted(out.readlines())
     expected = sorted([f"{self.new_file}\n", f"{self.second_file}\n"])
     self.assertEqual(lines, expected)
Ejemplo n.º 32
0
    def test_graph_disconnected_to_dot(self):
        dependencies_expected = (
            ('towel-stuff', 'bacon', 'bacon (<=0.2)'),
            ('grammar', 'bacon', 'truffles (>=1.2)'),
            ('choxie', 'towel-stuff', 'towel-stuff (0.1)'),
            ('banana', 'strawberry', 'strawberry (>=0.5)'),
        )
        disconnected_expected = ('cheese', 'bacon', 'strawberry')

        dists = []
        for name in self.DISTROS_DIST + self.DISTROS_EGG:
            dist = get_distribution(name, use_egg_info=True)
            self.assertNotEqual(dist, None)
            dists.append(dist)

        graph = depgraph.generate_graph(dists)
        buf = StringIO()
        depgraph.graph_to_dot(graph, buf, skip_disconnected=False)
        buf.seek(0)
        lines = buf.readlines()

        dependencies_lines = []
        disconnected_lines = []

        # First sort output lines into dependencies and disconnected lines.
        # We also skip the attribute lines, and don't include the "{" and "}"
        # lines.
        disconnected_active = False
        for line in lines[1:-1]:  # Skip first and last line
            if line.startswith('subgraph disconnected'):
                disconnected_active = True
                continue
            if line.startswith('}') and disconnected_active:
                disconnected_active = False
                continue

            if disconnected_active:
                # Skip the 'label = "Disconnected"', etc. attribute lines.
                if ' = ' not in line:
                    disconnected_lines.append(line)
            else:
                dependencies_lines.append(line)

        dependencies_matches = []
        for line in dependencies_lines:
            if line[-1] == '\n':
                line = line[:-1]
            match = self.EDGE.match(line.strip())
            self.assertIsNot(match, None)
            dependencies_matches.append(match.groups())

        disconnected_matches = []
        for line in disconnected_lines:
            if line[-1] == '\n':
                line = line[:-1]
            line = line.strip('"')
            disconnected_matches.append(line)

        self.checkLists(dependencies_matches, dependencies_expected)
        self.checkLists(disconnected_matches, disconnected_expected)
Ejemplo n.º 33
0
def bannerize(str, term_w, figlet_fonts, use_pyfiglet=True):
    # do 100 attempts to find a suitable font
    # criteria:
    #   must be narrower than term window
    #   wider than 1/4 the term window(the larger, the prettier)
    if not isinstance(figlet_fonts, list):
        figlet_fonts = [fi.strip() for fi in figlet_fonts.split(',')]
    out = "\n" + str + "\n" + '-' * len(str) + "\n"
    fi = "none"
    if not PYFIG or not use_pyfiglet:
        return (out, fi)
    for i in range(100):
        fi = rand_font(figlet_fonts)
        try:
            f = Figlet(font=fi, width=term_w)
        except FontNotFound:
            continue
        out = f.renderText(str)
        out_IO = StringIO(out)
        out_width = max([len(a) for a in out_IO.readlines()])
        # print("outWidth: %d"%outWidth)
        if out_width <= term_w and out_width > (term_w / 4):
            # print("Font name: " + fi)
            return (out, fi)
    return (out, fi)
Ejemplo n.º 34
0
 def smi(self, subdata):
     if requests_version < 0x20300:
         subdata = subdata.content.decode("latin")
     else:
         subdata.encoding = "ISO-8859-1"
         subdata = subdata.text
     ssubdata = StringIO(subdata)
     timea = 0
     number = 1
     data = None
     subs = ""
     TAG_RE = re.compile(r"<(?!\/?i).*?>")
     bad_char = re.compile(r"\x96")
     for i in ssubdata.readlines():
         i = i.rstrip()
         sync = re.search(r"<SYNC Start=(\d+)>", i)
         if sync:
             if int(sync.group(1)) != int(timea):
                 if data and data != "&nbsp;":
                     subs += "{}\n{} --> {}\n".format(
                         number, timestr(timea), timestr(sync.group(1)))
                     text = "%s\n" % TAG_RE.sub("",
                                                data.replace("<br>", "\n"))
                     text = decode_html_entities(text)
                     if text[len(text) - 2] != "\n":
                         text += "\n"
                     subs += text
                     number += 1
             timea = sync.group(1)
         text = re.search("<P Class=SVCC>(.*)", i)
         if text:
             data = text.group(1)
     recomp = re.compile(r"\r")
     text = bad_char.sub("-", recomp.sub("", subs))
     return text
    def test_then_it_should_pass_all_logs(self):
        stream = StringIO()
        subject = MultiProcessingHandler('mp-handler',
                                         logging.StreamHandler(stream=stream))
        logger = logging.Logger('root')
        logger.addHandler(subject)

        def worker(wid, logger):
            for _ in range(10):
                logger.info("Worker %d log.", wid)

        logger.info("Starting workers...")
        procs = [
            mp.Process(target=worker, args=(wid, logger)) for wid in range(2)
        ]
        for proc in procs:
            proc.start()
        logger.info("Workers started.")

        for proc in procs:
            proc.join()
        logger.info("Workers done.")

        subject.close()

        stream.seek(0)
        lines = stream.readlines()
        self.assertIn("Starting workers...\n", lines)
        self.assertIn("Workers started.\n", lines)
        self.assertIn("Workers done.\n", lines)
        self.assertEqual(10 * 2 + 3, len(lines))
Ejemplo n.º 36
0
 def _GetReleaseForCommit(self, commit_hash: str, n: int = 100):
     if n > 2000:
         logging.error(
             "We couldn't find the release branch that we correspond to")
         return "0.0.0.0"
     git_dir = os.path.dirname(self.sp)
     cmd_args = ["log", '--format="%h %D"', "-n " + str(n)]
     return_buffer = StringIO()
     RunCmd("git",
            " ".join(cmd_args),
            workingdir=git_dir,
            outstream=return_buffer)
     return_buffer.seek(0)
     results = return_buffer.readlines()
     return_buffer.close()
     log_re = re.compile(r'(release|dev)/(\d{6})')
     for log_item in results:
         commit = log_item[:11]
         branch = log_item[11:].strip()
         if len(branch) == 0:
             continue
         match = log_re.search(branch)
         if match:
             logging.info("Basing our release commit off of commit " +
                          commit)
             return match.group(2)
     return self._GetReleaseForCommit(commit_hash, n * 2)
Ejemplo n.º 37
0
 def gen(self, fname, flags=None, dlls=[], debug=False):
     """Take a file input and generate the code.
     """
     flags = flags or []
     dlls = dlls or []
     ofi = StringIO()
     gen = self._gen(ofi, fname, flags=flags, dlls=dlls)
     # load code
     namespace = {}
     # DEBUG
     # print ofi.getvalue()
     # DEBUG
     ofi.seek(0)
     ignore_coding = ofi.readline()
     # exec ofi.getvalue() in namespace
     output = ''.join(ofi.readlines())
     self.text_output = output
     try:
         # PY3 change
         exec(output, namespace)
     except Exception:
         print(output)
         raise
     # except NameError:
     #     print(output)
     self.namespace = ADict(namespace)
     if debug:
         print(output)
     return
Ejemplo n.º 38
0
def ascii_art_text(str, font, term_w):
    """ Given a str, and a font(or rand for choose a font at random)
    and a terminal width. Returns figletized text
    and the font that was used.
    """
    # default is no figlets
    out = "\n" + str + "\n" + '-' * len(str) + "\n"
    if font != "rand":
        f = pyfiglet.Figlet(font=font)
        out = f.renderText(str)
        return(out, font)
    else:
        # do 100 attempts to find a suitable font
        # criteria:
        #   must be narrower than term window
        #   wider than 1/4 the term window(the larger, the prettier)
        fi = "none"
        for i in range(100):
            fi = rand_font()
            f = pyfiglet.Figlet(font=fi)  # , width=term_w )
            out = f.renderText(str)
            from io import StringIO
            out_IO = StringIO(out)
            out_width = max([len(a) for a in out_IO.readlines()])
            # print("outWidth: %d"%outWidth)
            if out_width <= term_w and out_width > (term_w / 4):
                # print("Font name: " + fi)
                return(out, fi)
    return(out, fi)
Ejemplo n.º 39
0
    def test_custom_ids(self):
        sio = StringIO()
        action = test_smart_add.AddCustomIDAction(to_file=sio,
                                                  should_print=True)
        self.build_tree(['file1', 'dir1/', 'dir1/file2'])

        wt = self.make_branch_and_tree('.')
        if not wt._format.supports_setting_file_ids:
            self.assertRaises(workingtree.SettingFileIdUnsupported,
                              wt.smart_add, ['.'],
                              action=action)
            return

        wt.smart_add(['.'], action=action)
        # The order of adds is not strictly fixed:
        sio.seek(0)
        lines = sorted(sio.readlines())
        self.assertEqual([
            'added dir1 with id directory-dir1\n',
            'added dir1/file2 with id file-dir1%file2\n',
            'added file1 with id file-file1\n',
        ], lines)
        wt.lock_read()
        self.addCleanup(wt.unlock)
        self.assertEqual([
            ('', wt.path2id('')),
            ('dir1', b'directory-dir1'),
            ('file1', b'file-file1'),
            ('dir1/file2', b'file-dir1%file2'),
        ], [(path, ie.file_id) for path, ie in wt.iter_entries_by_dir()])
Ejemplo n.º 40
0
 def test_csv(self):
     r = self.client.get("/?q=r&format=csv")
     self.assertEqual(r.status_code, 200)
     self.assertEqual(r["Content-Type"], "text/csv")
     result = StringIO(r.content.decode())
     header, first_line, second_line = result.readlines()
     self.assertEqual(
         header,
         '"DOT_NUMBER","LEGAL_NAME","DBA_NAME","CARRIER_OPERATION","HM_FLAG",'
         '"PC_FLAG","PHY_STREET","PHY_CITY","PHY_STATE","PHY_ZIP","PHY_COUNTRY",'
         '"MAILING_STREET","MAILING_CITY","MAILING_STATE","MAILING_ZIP",'
         '"MAILING_COUNTRY","TELEPHONE","FAX","EMAIL_ADDRESS","MCS150_DATE",'
         '"MCS150_MILEAGE","MCS150_MILEAGE_YEAR","ADD_DATE","OIC_STATE",'
         '"NBR_POWER_UNIT","DRIVER_TOTAL"\r\n',
     )
     self.assertEqual(
         first_line,
         '42,"Killer Carrier, Inc","Killer Carrier","C","N","Y",'
         '"0 Abyss Alley","Nowhere","NY","12345","US","0 Abyss Alley",'
         '"Nowhere","NY","12345","US","+123456789",'
         '"+198765432","*****@*****.**","05-MAR-20",18725329,2020,'
         '"04-FEB-19","MA",5,4\r\n',
     )
     self.assertEqual(
         second_line,
         '43,"Transport Greatness","","","Y","N",'
         '"","","","","","",'
         '"","","","","",'
         '"","","","","",'
         '"03-JAN-19","","",""\r\n',
     )
Ejemplo n.º 41
0
def fix_date_and_remove_null(yaml_file, date, input_type='ruamel'):
    """
    Remove the single quotes around the date key-value pair in the provided yaml_file and remove any 'null' values
    :param yaml_file: ruamel.yaml instance or location of YAML file
    :param date: string date value (e.g. 2019-01-01)
    :param input_type: input type can be a ruamel.yaml instance or list
    :return: YAML file lines in a list
    """
    _yaml = init_yaml()
    if input_type == 'ruamel':
        # ruamel does not support output to a variable. Therefore we make use of StringIO.
        file = StringIO()
        _yaml.dump(yaml_file, file)
        file.seek(0)
        new_lines = file.readlines()
    elif input_type == 'list':
        new_lines = yaml_file
    elif input_type == 'file':
        new_lines = yaml_file.readlines()

    fixed_lines = [l.replace('\'' + str(date) + '\'', str(date)).replace('null', '')
                   if REGEX_YAML_DATE.match(l) else
                   l.replace('null', '') for l in new_lines]

    return fixed_lines
Ejemplo n.º 42
0
    def test_graph_bad_version_to_dot(self):
        expected = (
            ('towel-stuff', 'bacon', 'bacon (<=0.2)'),
            ('grammar', 'bacon', 'truffles (>=1.2)'),
            ('choxie', 'towel-stuff', 'towel-stuff (0.1)'),
            ('banana', 'strawberry', 'strawberry (>=0.5)'),
        )

        dists = []
        for name in self.DISTROS_DIST + self.DISTROS_EGG + self.BAD_EGGS:
            dist = get_distribution(name, use_egg_info=True)
            self.assertNotEqual(dist, None)
            dists.append(dist)

        graph = depgraph.generate_graph(dists)
        buf = StringIO()
        depgraph.graph_to_dot(graph, buf)
        buf.seek(0)
        matches = []
        lines = buf.readlines()
        for line in lines[1:-1]:  # skip the first and the last lines
            if line[-1] == '\n':
                line = line[:-1]
            match = self.EDGE.match(line.strip())
            self.assertIsNot(match, None)
            matches.append(match.groups())

        self.checkLists(matches, expected)
Ejemplo n.º 43
0
    def remove_blank_lines(source):
        """
        Removes blank lines from 'source' and returns the result.
        """

        io_obj = StringIO(source)
        source = [a for a in io_obj.readlines() if a.strip()]
        return "".join(source)
Ejemplo n.º 44
0
def test_serialize():
    out = StringIO()
    influxdb.serialize(out, "cpu", dict(load=10.0, alert=True),
                       dict(host="Server A", region="us west"),
                       datetime(2015, 2, 22, 17, 55))
    out.seek(0)
    r = out.readlines()
    assert "cpu,host=Server\ A,region=us\ west alert=t,load=10.0 1424624100000000000\n" == r[0]
Ejemplo n.º 45
0
 def send_typical_request(self, message):
     input = StringIO(message)
     output = StringIO()
     self.handler.rfile = input
     self.handler.wfile = output
     self.handler.handle_one_request()
     output.seek(0)
     return output.readlines()
Ejemplo n.º 46
0
def filter_commented_out_code(source):
    """Yield code with commented out code removed."""
    marked_lines = list(commented_out_code_line_numbers(source))
    sio = StringIO(source)
    previous_line = ''
    for line_number, line in enumerate(sio.readlines(), start=1):
        if (line_number not in marked_lines or
                previous_line.rstrip().endswith('\\')):
            yield line
        previous_line = line
Ejemplo n.º 47
0
def open_mock(content, **kwargs):
    content_out = StringIO()
    m = mock_open(read_data=content)
    with patch('__builtin__.open', m, create=True, **kwargs) as mo:
        stream = StringIO(content)
        rv = mo.return_value
        rv.write = lambda x: content_out.write(bytes(x, "utf-8"))
        rv.content_out = lambda: content_out.getvalue()
        rv.__iter__ = lambda x: iter(stream.readlines())
        yield rv
Ejemplo n.º 48
0
def printable_error(error, tb=None):
    printable = [repr(error)]
    if tb is None:
        tb = sys.exc_info()[2]
    printed = StringIO()
    traceback.print_tb(tb, file=printed)
    printed.seek(0)
    for line in printed.readlines():
        printable.append(line.rstrip('\n'))
    return printable
Ejemplo n.º 49
0
def solve_it(file_name, mode=1, threads=4):
    pool = ThreadPool(threads)
    results = []

    with open(file_name, 'r') as input_data_file:
        input_data = input_data_file.read()

    buf = StringIO(input_data)
    first_line = buf.readline().split()
    node_count = int(first_line[0])

    if node_count == 1000:
        for i in range(threads):
            results.append(pool.apply_async(run_prog, args=(file_name, str(mode), './ls.out')))
    else:
        for i in range(threads):
            results.append(pool.apply_async(run_prog, args=(file_name, str(2), 'greedy.py')))

    pool.close()
    pool.join()
    results = [r.get().decode('utf-8') for r in results]

    opt_num_colors = sys.maxsize
    output_data = 'no sol'

    if node_count == 1000:
        for i in results:
            buf = StringIO(i)
            all_lines = buf.readlines()
            colors_and_proof = all_lines[4].split()
            if int(colors_and_proof[0]) < opt_num_colors:
                output_data = all_lines[4] + all_lines[5]
    else:
        for i in results:
            buf = StringIO(i)
            all_lines = buf.readlines()
            # print(all_lines)
            colors_and_proof = all_lines[2].split()
            if int(colors_and_proof[0]) < opt_num_colors:
                output_data = all_lines[2] + all_lines[3]

    return output_data
Ejemplo n.º 50
0
    def test_logger_unicode_level(self):

        output = StringIO()
        logger = Logger.get('unicode', output, OutputStream, level='info')

        logger.info('fline')

        output.seek(0)
        log_lines = output.readlines()

        self.assertEqual(len(log_lines), 1)
Ejemplo n.º 51
0
    def test_info_duplicate_columns_shows_correct_dtypes(self):
        # GH11761
        io = StringIO()

        frame = DataFrame([[1, 2.0]],
                          columns=['a', 'a'])
        frame.info(buf=io)
        io.seek(0)
        lines = io.readlines()
        assert 'a    1 non-null int64\n' == lines[3]
        assert 'a    1 non-null float64\n' == lines[4]
Ejemplo n.º 52
0
def _parse_preamble(reader):
    lines = []
    if reader[0:2] == "{%":
        reader.consume(2)
        end = reader.find("%}")
        content = reader.consume(end)
        reader.consume(2)
        io = StringIO(content)
        lines = [l[:-1] for l in io.readlines()]
        io.close()
    return lines
Ejemplo n.º 53
0
    def test_logger_stream(self):

        output = StringIO()
        logger = Logger.get('stream', output, OutputStream)

        logger.info('first_line')
        logger.info('second_line')

        output.seek(0)
        log_lines = output.readlines()

        self.assertEqual(len(log_lines), 2)
Ejemplo n.º 54
0
def build_dict_from_csv(body):
    if not isinstance(body, str):
        body = body.decode(errors='ignore')
    buf = StringIO(body)
    raw_fields = buf.readlines()[0]
    fields = raw_fields.strip().split(',')
    buf.seek(len(raw_fields))
    r = csv.DictReader(buf, fields)
    ret = []
    for row in r:
        ret.append(row)
    return ret
Ejemplo n.º 55
0
 def get_archives(self):
     """A list of archives as returned by --list-archives. Queried
     the first time it is accessed, and then subsequently cached.
     """
     if self._queried_archives is None:
         response = StringIO(self.call('--list-archives'))
         self._queried_archives = [l.rstrip() for l in response.readlines()]
         if ['v'] in self.options:
             # Filter out extraneous info if tarsnap was run with
             # verbose flag
             self._queried_archives = [
                 l.rsplit('\t', 1)[0] for l in self._queried_archives]
     return self._queried_archives + self._known_archives
Ejemplo n.º 56
0
def format_exception(type, value, tb):
    """ Format just exception, just a traceback or both. Return a list of lines. """
    buffer = StringIO()
    if tb is not None:
        if hasattr(tb, "tb_frame"):
            tb = traceback.extract_tb(tb)
        for line in traceback.format_list(tb):
            buffer.write(unicode(line))
    if (type is not None) and (value is not None):
        for line in traceback.format_exception_only(type, value):
            buffer.write(unicode(line))
    buffer.seek(0)
    return buffer.readlines()
Ejemplo n.º 57
0
    def _parse(self, data):
        """ Parse the output from the 'mntr' 4letter word command """
        h = StringIO(data.decode())

        result = {}
        for line in h.readlines():
            try:
                key, value = self._parse_line(line)
                result[key] = value
            except ValueError:
                pass  # ignore broken lines

        return result
Ejemplo n.º 58
0
def hosts_map(device='eth0',exclude=None):
    output = subprocess.check_output(['arp','-n'])
    r = StringIO(output.decode('utf-8'))
    reb = re.compile('\s+')
    res = set()
    for line in r.readlines():
        item = reb.split(line.strip())
        if(len(item)<5):
            continue
        if(item[4] != device or item[0] in exclude):
            continue
        res.add((item[0],item[2]))
    r.close()
    return res
Ejemplo n.º 59
0
def fetch_results(_id):
    url_data = UrlSync.objects.get(id=_id)

    processed_line = url_data.current_line

    class_ = load_class(url_data.competition.processing_class)
    processing_class = class_(url_data.competition.id)
    try:
        resp = requests.get(url_data.url)

        if resp.status_code != 200:
            return

        buf = StringIO(resp.text)
        file_lines = tuple(buf.readlines())
        lines_to_process = file_lines[processed_line:]
        for line in lines_to_process:
            print(line)
            if len(line.strip()) == 0:
                print('empty line')
                continue
            number, time_text = line.strip().split(',')
            if number == '0':
                print('skipping 0 number')
                continue
            scan, created = ChipScan.objects.get_or_create(competition=url_data.competition, nr_text=number, time_text=time_text, url_sync=url_data)
            scan.time = time_text
            try:
                scan.nr = Number.objects.get(competition_id__in=url_data.competition.get_ids(), number=number, group='')
            except:
                print('number not found')
                continue
            finally:
                scan.save()
            process_chip_result.delay(scan.id)
            # processing_class.process_chip_result(scan.id)

        url_data.current_line = len(file_lines)
        url_data.save()

        # TODO: This should be removed after process review
        processing_class.recalculate_all_standings()
        send_smses()

    except:
        error = traceback.format_exc()
        Log.objects.create(content_object=url_data, action="Error processing file", params={
            'error': error,
        })
        raise Exception('Error processing external chip file')