Esempio n. 1
0
File: txt.py Progetto: tilacog/rows
def export_to_txt(table, filename_or_fobj, encoding='utf-8', *args, **kwargs):
    # TODO: will work only if table.fields is OrderedDict
    # TODO: should use fobj? What about creating a method like json.dumps?

    filename, fobj = get_filename_and_fobj(filename_or_fobj, mode='w')
    max_sizes = _max_column_sizes(table, encoding, *args, **kwargs)

    fields = table.fields.keys()
    dashes = [DASH * (max_sizes[field] + 2) for field in fields]
    header = [field.center(max_sizes[field]) for field in fields]
    header = '{} {} {}'.format(PIPE, ' {} '.format(PIPE).join(header), PIPE)
    split_line = PLUS + PLUS.join(dashes) + PLUS

    result = [split_line, header, split_line]
    for row in serialize(table):
        values = [
            value.rjust(max_sizes[field_name])
            for field_name, value in zip(fields, row)
        ]
        row_data = ' {} '.format(PIPE).join(values)
        result.append('{} {} {}'.format(PIPE, row_data, PIPE))
    result.extend([split_line, '\n'])

    data = '\n'.join(result).encode(encoding)

    fobj.write(data)
    fobj.flush()
    return fobj
Esempio n. 2
0
def export_to_html(table, filename_or_fobj=None, encoding='utf-8'):
    fields = table.fields.keys()
    result = ['<table>\n\n', '  <thead>\n', '    <tr>\n']
    header = ['      <th> {} </th>\n'.format(field) for field in fields]
    result.extend(header)
    result.extend(['    </tr>\n', '  </thead>\n', '\n', '  <tbody>\n', '\n'])
    for index, row in enumerate(serialize(table, encoding=encoding), start=1):
        css_class = 'odd' if index % 2 == 1 else 'even'
        result.append('    <tr class="{}">\n'.format(css_class))
        for value in row:
            result.extend(['      <td> ', value, ' </td>\n'])
        result.append('    </tr>\n\n')
    result.append('  </tbody>\n\n</table>\n')
    new_result = [
        value.encode(encoding) if isinstance(value, unicode) else value
        for value in result
    ]
    html = ''.encode(encoding).join(new_result)

    if filename_or_fobj is not None:
        filename, fobj = get_filename_and_fobj(filename_or_fobj, mode='w')
        fobj.write(html)
        fobj.flush()
        return fobj
    else:
        return html
Esempio n. 3
0
def export_to_txt(table, filename_or_fobj, encoding='utf-8', *args, **kwargs):
    # TODO: will work only if table.fields is OrderedDict
    # TODO: should use fobj? What about creating a method like json.dumps?

    filename, fobj = get_filename_and_fobj(filename_or_fobj, mode='w')
    max_sizes = _max_column_sizes(table, encoding, *args, **kwargs)

    fields = table.fields.keys()
    dashes = [DASH * (max_sizes[field] + 2) for field in fields]
    header = [field.center(max_sizes[field]) for field in fields]
    header = '{} {} {}'.format(PIPE, ' {} '.format(PIPE).join(header), PIPE)
    split_line = PLUS + PLUS.join(dashes) + PLUS

    result = [split_line, header, split_line]
    for row in serialize(table):
        values = [value.rjust(max_sizes[field_name])
                  for field_name, value in zip(fields, row)]
        row_data = ' {} '.format(PIPE).join(values)
        result.append('{} {} {}'.format(PIPE, row_data, PIPE))
    result.extend([split_line, '\n'])

    data = '\n'.join(result).encode(encoding)

    fobj.write(data)
    fobj.flush()
    return fobj
Esempio n. 4
0
def export_to_html(table, filename_or_fobj=None, encoding='utf-8'):
    fields = table.fields.keys()
    result = ['<table>\n\n', '  <thead>\n', '    <tr>\n']
    header = ['      <th> {} </th>\n'.format(field) for field in fields]
    result.extend(header)
    result.extend(['    </tr>\n', '  </thead>\n', '\n', '  <tbody>\n', '\n'])
    for index, row in enumerate(serialize(table, encoding=encoding), start=1):
        css_class = 'odd' if index % 2 == 1 else 'even'
        result.append('    <tr class="{}">\n'.format(css_class))
        for value in row:
            result.extend(['      <td> ', value, ' </td>\n'])
        result.append('    </tr>\n\n')
    result.append('  </tbody>\n\n</table>\n')
    new_result = [value.encode(encoding) if isinstance(value, unicode)
                                         else value
                  for value in result]
    html = ''.encode(encoding).join(new_result)

    if filename_or_fobj is not None:
        filename, fobj = get_filename_and_fobj(filename_or_fobj, mode='w')
        fobj.write(html)
        fobj.flush()
        return fobj
    else:
        return html
Esempio n. 5
0
File: txt.py Progetto: tilacog/rows
def _max_column_sizes(table, encoding, *args, **kwargs):
    header = table.fields.keys()
    max_sizes = {field_name: len(field_name) for field_name in header}
    for row in serialize(table, encoding=encoding, *args, **kwargs):
        for field_name, value in zip(header, row):
            length = len(value)
            if max_sizes[field_name] < length:
                max_sizes[field_name] = length
    return max_sizes
Esempio n. 6
0
def _max_column_sizes(table, encoding, *args, **kwargs):
    header = table.fields.keys()
    max_sizes = {field_name: len(field_name) for field_name in header}
    for row in serialize(table, encoding=encoding, *args, **kwargs):
        for field_name, value in zip(header, row):
            length = len(value)
            if max_sizes[field_name] < length:
                max_sizes[field_name] = length
    return max_sizes
Esempio n. 7
0
def export_to_csv(table, filename_or_fobj, encoding='utf-8'):
    # TODO: will work only if table.fields is OrderedDict
    # TODO: should use fobj? What about creating a method like json.dumps?

    filename, fobj = get_filename_and_fobj(filename_or_fobj, mode='w')
    csv_writer = unicodecsv.writer(fobj, encoding=encoding)

    csv_writer.writerow(table.fields.keys())
    for row in serialize(table, encoding=encoding):
        csv_writer.writerow(row)

    fobj.flush()
    return fobj