예제 #1
0
    def test_export_data(self):
        data = "python rules".encode("utf-8")
        temp = tempfile.NamedTemporaryFile(delete=False)
        self.files_to_delete.append(temp.name)

        filename_or_fobj = temp.file
        result = plugins_utils.export_data(filename_or_fobj, data)
        temp.file.seek(0)
        output = temp.file.read()
        self.assertIs(result, temp.file)
        self.assertEqual(output, data)

        filename_or_fobj = None
        result = plugins_utils.export_data(filename_or_fobj, data)
        self.assertIs(result, data)
예제 #2
0
def export_to_json(table, filename_or_fobj=None, encoding='utf-8', indent=None,
                   *args, **kwargs):
    '''Export a `rows.Table` to a JSON file or file-like object

    If a file-like object is provided it MUST be open in binary mode (like in
    `open('myfile.json', mode='wb')`).
    '''
    # TODO: will work only if table.fields is OrderedDict

    fields = table.fields
    prepared_table = prepare_to_export(table, *args, **kwargs)
    field_names = next(prepared_table)
    data = [{field_name: _convert(value, fields[field_name], *args, **kwargs)
             for field_name, value in zip(field_names, row)}
            for row in prepared_table]

    result = json.dumps(data, indent=indent)
    if type(result) is six.text_type:  # Python 3
        result = result.encode(encoding)

    if indent is not None:
        # clean up empty spaces at the end of lines
        result = b'\n'.join(line.rstrip() for line in result.splitlines())

    return export_data(filename_or_fobj, result, mode='wb')
예제 #3
0
def export_to_yaml(table,
                   filename_or_fobj=None,
                   encoding='utf-8',
                   indent=None,
                   *args,
                   **kwargs):
    '''Export a `rows.Table` to a YAML file or file-like object
    '''

    all_fields = table.fields
    prepared_table = prepare_to_export(table, *args, **kwargs)
    field_names = next(prepared_table)
    data = [{
        field_name: _convert(value, all_fields[field_name], *args, **kwargs)
        for field_name, value in zip(field_names, row)
    } for row in prepared_table]

    result = yaml.dump(data, indent=indent)
    if type(result) is six.text_type:
        result = result.encode(encoding)

    if indent is not None:
        # clean up empty spaces at the end of lines
        result = b'\n'.join(line.rstrip() for line in result.splitlines())

    return export_data(filename_or_fobj, result, mode='wb')
예제 #4
0
def export_to_json(table,
                   filename_or_fobj=None,
                   encoding='utf-8',
                   indent=None,
                   *args,
                   **kwargs):
    '''Export a `rows.Table` to a JSON file or file-like object

    If a file-like object is provided it MUST be open in binary mode (like in
    `open('myfile.json', mode='wb')`).
    '''
    # TODO: will work only if table.fields is OrderedDict

    fields = table.fields
    prepared_table = prepare_to_export(table, *args, **kwargs)
    field_names = next(prepared_table)
    data = [{
        field_name: _convert(value, fields[field_name], *args, **kwargs)
        for field_name, value in zip(field_names, row)
    } for row in prepared_table]

    result = json.dumps(data, indent=indent)
    if type(result) is six.text_type:  # Python 3
        result = result.encode(encoding)

    if indent is not None:
        # clean up empty spaces at the end of lines
        result = b'\n'.join(line.rstrip() for line in result.splitlines())

    return export_data(filename_or_fobj, result, mode='wb')
예제 #5
0
def export_to_txt(table,
                  filename_or_fobj=None,
                  encoding='utf-8',
                  *args,
                  **kwargs):
    # TODO: should be able to change DASH, PLUS and PIPE
    # TODO: will work only if table.fields is OrderedDict
    # TODO: should use fobj? What about creating a method like json.dumps?

    kwargs['encoding'] = encoding
    serialized_table = serialize(table, *args, **kwargs)
    field_names = serialized_table.next()
    table_rows = list(serialized_table)
    max_sizes = _max_column_sizes(field_names, table_rows)

    dashes = [DASH * (max_sizes[field] + 2) for field in field_names]
    header = [field.center(max_sizes[field]) for field in field_names]
    header = '{} {} {}'.format(PIPE, ' {} '.format(PIPE).join(header), PIPE)
    split_line = PLUS + PLUS.join(dashes) + PLUS

    result = [split_line, header, split_line]
    for row in table_rows:
        values = [
            value.rjust(max_sizes[field_name])
            for field_name, value in zip(field_names, row)
        ]
        row_data = ' {} '.format(PIPE).join(values)
        result.append('{} {} {}'.format(PIPE, row_data, PIPE))
    result.extend([split_line, ''])
    data = '\n'.join(result).encode(encoding)

    return export_data(filename_or_fobj, data)
예제 #6
0
파일: txt.py 프로젝트: abelthf/rows
def export_to_txt(table, filename_or_fobj=None, encoding='utf-8', *args, **kwargs):
    # TODO: should be able to change DASH, PLUS and PIPE
    # TODO: will work only if table.fields is OrderedDict
    # TODO: should use fobj? What about creating a method like json.dumps?

    kwargs['encoding'] = encoding
    serialized_table = serialize(table, *args, **kwargs)
    field_names = serialized_table.next()
    table_rows = list(serialized_table)
    max_sizes = _max_column_sizes(field_names, table_rows)

    dashes = [DASH * (max_sizes[field] + 2) for field in field_names]
    header = [field.center(max_sizes[field]) for field in field_names]
    header = '{} {} {}'.format(PIPE, ' {} '.format(PIPE).join(header), PIPE)
    split_line = PLUS + PLUS.join(dashes) + PLUS

    result = [split_line, header, split_line]
    for row in table_rows:
        values = [value.rjust(max_sizes[field_name])
                  for field_name, value in zip(field_names, row)]
        row_data = ' {} '.format(PIPE).join(values)
        result.append('{} {} {}'.format(PIPE, row_data, PIPE))
    result.extend([split_line, ''])
    data = '\n'.join(result).encode(encoding)

    return export_data(filename_or_fobj, data)
예제 #7
0
파일: _json.py 프로젝트: abelthf/rows
def export_to_json(table, filename_or_fobj=None, encoding='utf-8', indent=None,
                   *args, **kwargs):
    # TODO: will work only if table.fields is OrderedDict

    fields = table.fields
    prepared_table = prepare_to_export(table, *args, **kwargs)
    field_names = prepared_table.next()
    data = [{field_name: _convert(value, fields[field_name], *args, **kwargs)
             for field_name, value in zip(field_names, row)}
            for row in prepared_table]

    result = json.dumps(data, indent=indent)
    if indent is not None:
        result = '\n'.join(line.rstrip() for line in result.splitlines())

    return export_data(filename_or_fobj, result)
예제 #8
0
def export_to_html(table, filename_or_fobj=None, encoding="utf-8", *args, **kwargs):
    """Export and return rows.Table data to HTML file."""
    serialized_table = serialize(table, *args, **kwargs)
    fields = next(serialized_table)
    result = ["<table>\n\n", "  <thead>\n", "    <tr>\n"]
    header = ["      <th> {} </th>\n".format(field) for field in fields]
    result.extend(header)
    result.extend(["    </tr>\n", "  </thead>\n", "\n", "  <tbody>\n", "\n"])
    for index, row in enumerate(serialized_table, start=1):
        css_class = "odd" if index % 2 == 1 else "even"
        result.append('    <tr class="{}">\n'.format(css_class))
        for value in row:
            result.extend(["      <td> ", escape(value), " </td>\n"])
        result.append("    </tr>\n\n")
    result.append("  </tbody>\n\n</table>\n")
    html = "".join(result).encode(encoding)

    return export_data(filename_or_fobj, html, mode="wb")
예제 #9
0
def export_to_html(table, filename_or_fobj=None, encoding='utf-8', *args,
                   **kwargs):
    serialized_table = serialize(table, *args, **kwargs)
    fields = next(serialized_table)
    result = ['<table>\n\n', '  <thead>\n', '    <tr>\n']
    header = ['      <th> {} </th>\n'.format(field) for field in fields]
    result.extend(header)
    result.extend(['    </tr>\n', '  </thead>\n', '\n', '  <tbody>\n', '\n'])
    for index, row in enumerate(serialized_table, start=1):
        css_class = 'odd' if index % 2 == 1 else 'even'
        result.append('    <tr class="{}">\n'.format(css_class))
        for value in row:
            result.extend(['      <td> ', escape(value), ' </td>\n'])
        result.append('    </tr>\n\n')
    result.append('  </tbody>\n\n</table>\n')
    html = ''.join(result).encode(encoding)

    return export_data(filename_or_fobj, html, mode='wb')
예제 #10
0
def export_to_html(table,
                   filename_or_fobj=None,
                   encoding='utf-8',
                   *args,
                   **kwargs):
    serialized_table = serialize(table, *args, **kwargs)
    fields = next(serialized_table)
    result = ['<table>\n\n', '  <thead>\n', '    <tr>\n']
    header = ['      <th> {} </th>\n'.format(field) for field in fields]
    result.extend(header)
    result.extend(['    </tr>\n', '  </thead>\n', '\n', '  <tbody>\n', '\n'])
    for index, row in enumerate(serialized_table, start=1):
        css_class = 'odd' if index % 2 == 1 else 'even'
        result.append('    <tr class="{}">\n'.format(css_class))
        for value in row:
            result.extend(['      <td> ', value, ' </td>\n'])
        result.append('    </tr>\n\n')
    result.append('  </tbody>\n\n</table>\n')
    html = ''.join(result).encode(encoding)

    return export_data(filename_or_fobj, html, mode='wb')
예제 #11
0
파일: _json.py 프로젝트: abelthf/rows
def export_to_json(table,
                   filename_or_fobj=None,
                   encoding='utf-8',
                   indent=None,
                   *args,
                   **kwargs):
    # TODO: will work only if table.fields is OrderedDict

    fields = table.fields
    prepared_table = prepare_to_export(table, *args, **kwargs)
    field_names = prepared_table.next()
    data = [{
        field_name: _convert(value, fields[field_name], *args, **kwargs)
        for field_name, value in zip(field_names, row)
    } for row in prepared_table]

    result = json.dumps(data, indent=indent)
    if indent is not None:
        result = '\n'.join(line.rstrip() for line in result.splitlines())

    return export_data(filename_or_fobj, result)
예제 #12
0
def export_to_html(table, filename_or_fobj=None, encoding='utf-8', *args,
                   **kwargs):
    kwargs['encoding'] = encoding
    serialized_table = serialize(table, *args, **kwargs)
    fields = serialized_table.next()
    result = ['<table>\n\n', '  <thead>\n', '    <tr>\n']
    header = ['      <th> {} </th>\n'.format(field) for field in fields]
    result.extend(header)
    result.extend(['    </tr>\n', '  </thead>\n', '\n', '  <tbody>\n', '\n'])
    for index, row in enumerate(serialized_table, start=1):
        css_class = 'odd' if index % 2 == 1 else 'even'
        result.append('    <tr class="{}">\n'.format(css_class))
        for value in row:
            result.extend(['      <td> ', value, ' </td>\n'])
        result.append('    </tr>\n\n')
    result.append('  </tbody>\n\n</table>\n')
    new_result = [value.encode(encoding) if isinstance(value, unicode)
                  else value
                  for value in result]
    html = ''.encode(encoding).join(new_result)

    return export_data(filename_or_fobj, html)
예제 #13
0
def export_to_txt(table, filename_or_fobj=None, encoding=None,
                  *args, **kwargs):
    '''Export a `rows.Table` to text

    This function can return the result as a string or save into a file (via
    filename or file-like object).

    `encoding` could be `None` if no filename/file-like object is specified,
    then the return type will be `six.text_type`.
    '''
    # TODO: should be able to change DASH, PLUS and PIPE
    # TODO: will work only if table.fields is OrderedDict

    serialized_table = serialize(table, *args, **kwargs)
    field_names = next(serialized_table)
    table_rows = list(serialized_table)
    max_sizes = _max_column_sizes(field_names, table_rows)

    dashes = [DASH * (max_sizes[field] + 2) for field in field_names]
    header = [field.center(max_sizes[field]) for field in field_names]
    header = '{} {} {}'.format(PIPE, ' {} '.format(PIPE).join(header), PIPE)
    split_line = PLUS + PLUS.join(dashes) + PLUS

    result = [split_line, header, split_line]
    for row in table_rows:
        values = [value.rjust(max_sizes[field_name])
                  for field_name, value in zip(field_names, row)]
        row_data = ' {} '.format(PIPE).join(values)
        result.append('{} {} {}'.format(PIPE, row_data, PIPE))
    result.extend([split_line, ''])
    data = '\n'.join(result)

    if encoding is not None:
        data = data.encode(encoding)

    return export_data(filename_or_fobj, data, mode='wb')
예제 #14
0
파일: txt.py 프로젝트: turicas/rows
def export_to_txt(
    table,
    filename_or_fobj=None,
    encoding=None,
    frame_style="ASCII",
    safe_none_frame=True,
    *args,
    **kwargs
):
    """Export a `rows.Table` to text.

    This function can return the result as a string or save into a file (via
    filename or file-like object).

    `encoding` could be `None` if no filename/file-like object is specified,
    then the return type will be `six.text_type`.
    `frame_style`: will select the frame style to be printed around data.
    Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default.
    Warning: no checks are made to check the desired encoding allows the
    characters needed by single and double frame styles.

    `safe_none_frame`: bool, defaults to True. Affects only output with
    frame_style == "None":
    column titles are left-aligned and have
    whitespace replaced for "_".  This enables
    the output to be parseable. Otherwise, the generated table will look
    prettier but can not be imported back.
    """
    # TODO: will work only if table.fields is OrderedDict

    frame_style = _parse_frame_style(frame_style)
    frame = FRAMES[frame_style.lower()]

    serialized_table = serialize(table, *args, **kwargs)
    field_names = next(serialized_table)
    table_rows = list(serialized_table)
    max_sizes = _max_column_sizes(field_names, table_rows)

    dashes = [frame["HORIZONTAL"] * (max_sizes[field] + 2) for field in field_names]

    if frame_style != "None" or not safe_none_frame:
        header = [field.center(max_sizes[field]) for field in field_names]
    else:
        header = [
            field.replace(" ", "_").ljust(max_sizes[field]) for field in field_names
        ]
    header = "{0} {1} {0}".format(
        frame["VERTICAL"], " {} ".format(frame["VERTICAL"]).join(header)
    )
    top_split_line = (
        frame["DOWN AND RIGHT"]
        + frame["DOWN AND HORIZONTAL"].join(dashes)
        + frame["DOWN AND LEFT"]
    )

    body_split_line = (
        frame["VERTICAL AND RIGHT"]
        + frame["VERTICAL AND HORIZONTAL"].join(dashes)
        + frame["VERTICAL AND LEFT"]
    )

    botton_split_line = (
        frame["UP AND RIGHT"]
        + frame["UP AND HORIZONTAL"].join(dashes)
        + frame["UP AND LEFT"]
    )

    result = []
    if frame_style != "None":
        result += [top_split_line]
    result += [header, body_split_line]

    for row in table_rows:
        values = [
            value.rjust(max_sizes[field_name])
            for field_name, value in zip(field_names, row)
        ]
        row_data = " {} ".format(frame["VERTICAL"]).join(values)
        result.append("{0} {1} {0}".format(frame["VERTICAL"], row_data))

    if frame_style != "None":
        result.append(botton_split_line)
    result.append("")
    data = "\n".join(result)

    if encoding is not None:
        data = data.encode(encoding)

    return export_data(filename_or_fobj, data, mode="wb")
예제 #15
0
def export_to_txt(table,
                  filename_or_fobj=None,
                  encoding=None,
                  frame_style="ASCII",
                  safe_none_frame=True,
                  *args,
                  **kwargs):
    """Export a `rows.Table` to text.

    This function can return the result as a string or save into a file (via
    filename or file-like object).

    `encoding` could be `None` if no filename/file-like object is specified,
    then the return type will be `six.text_type`.
    `frame_style`: will select the frame style to be printed around data.
    Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default.
    Warning: no checks are made to check the desired encoding allows the
    characters needed by single and double frame styles.

    `safe_none_frame`: bool, defaults to True. Affects only output with
    frame_style == "None":
    column titles are left-aligned and have
    whitespace replaced for "_".  This enables
    the output to be parseable. Otherwise, the generated table will look
    prettier but can not be imported back.
    """
    # TODO: will work only if table.fields is OrderedDict

    frame_style = _parse_frame_style(frame_style)
    frame = FRAMES[frame_style.lower()]

    serialized_table = serialize(table, *args, **kwargs)
    field_names = next(serialized_table)
    table_rows = list(serialized_table)
    max_sizes = _max_column_sizes(field_names, table_rows)

    dashes = [
        frame['HORIZONTAL'] * (max_sizes[field] + 2) for field in field_names
    ]

    if frame_style != 'None' or not safe_none_frame:
        header = [field.center(max_sizes[field]) for field in field_names]
    else:
        header = [
            field.replace(" ", "_").ljust(max_sizes[field])
            for field in field_names
        ]
    header = '{0} {1} {0}'.format(
        frame['VERTICAL'], ' {} '.format(frame['VERTICAL']).join(header))
    top_split_line = (frame['DOWN AND RIGHT'] +
                      frame['DOWN AND HORIZONTAL'].join(dashes) +
                      frame['DOWN AND LEFT'])

    body_split_line = (frame['VERTICAL AND RIGHT'] +
                       frame['VERTICAL AND HORIZONTAL'].join(dashes) +
                       frame['VERTICAL AND LEFT'])

    botton_split_line = (frame['UP AND RIGHT'] +
                         frame['UP AND HORIZONTAL'].join(dashes) +
                         frame['UP AND LEFT'])

    result = []
    if frame_style != 'None':
        result += [top_split_line]
    result += [header, body_split_line]

    for row in table_rows:
        values = [
            value.rjust(max_sizes[field_name])
            for field_name, value in zip(field_names, row)
        ]
        row_data = ' {} '.format(frame['VERTICAL']).join(values)
        result.append('{0} {1} {0}'.format(frame['VERTICAL'], row_data))

    if frame_style != 'None':
        result.append(botton_split_line)
    result.append('')
    data = '\n'.join(result)

    if encoding is not None:
        data = data.encode(encoding)

    return export_data(filename_or_fobj, data, mode='wb')