コード例 #1
0
ファイル: io.py プロジェクト: deytao/petl
def appendpickle(table, source=None, protocol=-1):
    """
    Append data to an existing pickle file. E.g.::

        >>> from petl import look, frompickle
        >>> # inspect an existing pickle file
        ... testdat = frompickle('test.dat')
        >>> look(testdat)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | 1     |
        +-------+-------+
        | 'b'   | 2     |
        +-------+-------+
        | 'c'   | 2     |
        +-------+-------+
        
        >>> # append some data
        ... from petl import appendpickle
        >>> look(table)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'd'   | 7     |
        +-------+-------+
        | 'e'   | 42    |
        +-------+-------+
        | 'f'   | 12    |
        +-------+-------+
        
        >>> appendpickle(table, 'test.dat')
        >>> # look what it did
        ... look(testdat)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | 1     |
        +-------+-------+
        | 'b'   | 2     |
        +-------+-------+
        | 'c'   | 2     |
        +-------+-------+
        | 'd'   | 7     |
        +-------+-------+
        | 'e'   | 42    |
        +-------+-------+
        | 'f'   | 12    |
        +-------+-------+

    Note that no attempt is made to check that the fields or row lengths are 
    consistent with the existing data, the data rows from the table are simply
    appended to the file. See also the :func:`cat` function.
    
    """
    
    source = _write_source_from_arg(source)
    with source.open_('ab') as f:
        for row in data(table):
            pickle.dump(row, f, protocol)
コード例 #2
0
ファイル: hdf5.py プロジェクト: pombredanne/petlx
def _insert(table, h5table):
    it = data(table)  # don't need header
    for row in it:
        for i, f in enumerate(h5table.colnames):
            # depends on order of fields being the same in input table
            # and hd5 table, but field names don't need to match
            h5table.row[f] = row[i]
        h5table.row.append()
    h5table.flush()
コード例 #3
0
ファイル: hdf5.py プロジェクト: hexatonics/petlx
def _insert(table, h5table):
    it = data(table)  # don't need header
    for row in it:
        for i, f in enumerate(h5table.colnames):
            # depends on order of fields being the same in input table
            # and hd5 table, but field names don't need to match
            h5table.row[f] = row[i]
        h5table.row.append()
    h5table.flush()
コード例 #4
0
ファイル: csv.py プロジェクト: talwai/petl
def tocsv(table, source=None, dialect=csv.excel, write_header=True, **kwargs):
    """
    Write the table to a CSV file. E.g.::

        >>> from petl import tocsv, look
        >>> look(table)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | 1     |
        +-------+-------+
        | 'b'   | 2     |
        +-------+-------+
        | 'c'   | 2     |
        +-------+-------+

        >>> tocsv(table, 'test.csv')
        >>> # look what it did
        ... from petl import fromcsv
        >>> look(fromcsv('test.csv'))
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | '1'   |
        +-------+-------+
        | 'b'   | '2'   |
        +-------+-------+
        | 'c'   | '2'   |
        +-------+-------+

    The `filename` argument is the path of the delimited file, and the optional
    `write_header` argument specifies whether to include the field names in the
    delimited file.  All other keyword arguments are passed to
    :func:`csv.writer`. So, e.g., to override the delimiter from the default
    CSV dialect, provide the `delimiter` keyword argument.

    Note that if a file already exists at the given location, it will be
    overwritten.

    Supports transparent writing to ``.gz`` and ``.bz2`` files.

    """

    source = write_source_from_arg(source)
    with source.open_('wb') as f:
        writer = csv.writer(f, dialect=dialect, **kwargs)
        # User specified no header
        if not write_header:
            for row in data(table):
                writer.writerow(row)
       # Default behavior, write the header
        else:
            for row in table:
                writer.writerow(row)
コード例 #5
0
ファイル: csv.py プロジェクト: wstranger/petl
def tocsv(table, source=None, dialect=csv.excel, write_header=True, **kwargs):
    """
    Write the table to a CSV file. E.g.::

        >>> from petl import tocsv, look
        >>> look(table)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | 1     |
        +-------+-------+
        | 'b'   | 2     |
        +-------+-------+
        | 'c'   | 2     |
        +-------+-------+

        >>> tocsv(table, 'test.csv')
        >>> # look what it did
        ... from petl import fromcsv
        >>> look(fromcsv('test.csv'))
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | '1'   |
        +-------+-------+
        | 'b'   | '2'   |
        +-------+-------+
        | 'c'   | '2'   |
        +-------+-------+

    The `filename` argument is the path of the delimited file, and the optional
    `write_header` argument specifies whether to include the field names in the
    delimited file.  All other keyword arguments are passed to
    :func:`csv.writer`. So, e.g., to override the delimiter from the default
    CSV dialect, provide the `delimiter` keyword argument.

    Note that if a file already exists at the given location, it will be
    overwritten.

    Supports transparent writing to ``.gz`` and ``.bz2`` files.

    """

    source = write_source_from_arg(source)
    with source.open_('wb') as f:
        writer = csv.writer(f, dialect=dialect, **kwargs)
        # User specified no header
        if not write_header:
            for row in data(table):
                writer.writerow(row)
       # Default behavior, write the header
        else:
            for row in table:
                writer.writerow(row)
コード例 #6
0
ファイル: csv.py プロジェクト: wstranger/petl
 def __iter__(self):
     source = write_source_from_arg(self.source)
     with source.open_('wb') as f:
         writer = csv.writer(f, dialect=self.dialect, **self.kwargs)
         # User specified no header
         if not self.write_header:
             for row in data(self.table):
                 writer.writerow(row)
                 yield row
        # Default behavior, write the header
         else:
             for row in self.table:
                 writer.writerow(row)
                 yield row
コード例 #7
0
ファイル: csv.py プロジェクト: wstranger/petl
def appenducsv(table, source=None, dialect=csv.excel, encoding='utf-8',
               **kwargs):
    """
    Append the table to a CSV file via the given encoding. Like
    :func:`appendcsv` but accepts an additional ``encoding`` argument which
    should be one of the Python supported encodings. See also :mod:`codecs`.

    .. versionadded:: 0.19
    """
    source = write_source_from_arg(source)
    with source.open_('ab') as f:
        writer = UnicodeWriter(f, dialect=dialect, encoding=encoding, **kwargs)
        for row in data(table):
            writer.writerow(row)
コード例 #8
0
ファイル: csv.py プロジェクト: talwai/petl
 def __iter__(self):
     source = write_source_from_arg(self.source)
     with source.open_('wb') as f:
         writer = csv.writer(f, dialect=self.dialect, **self.kwargs)
         # User specified no header
         if not self.write_header:
             for row in data(self.table):
                 writer.writerow(row)
                 yield row
        # Default behavior, write the header
         else:
             for row in self.table:
                 writer.writerow(row)
                 yield row
コード例 #9
0
ファイル: csv.py プロジェクト: talwai/petl
def appenducsv(table, source=None, dialect=csv.excel, encoding='utf-8',
               **kwargs):
    """
    Append the table to a CSV file via the given encoding. Like
    :func:`appendcsv` but accepts an additional ``encoding`` argument which
    should be one of the Python supported encodings. See also :mod:`codecs`.

    .. versionadded:: 0.19
    """
    source = write_source_from_arg(source)
    with source.open_('ab') as f:
        writer = UnicodeWriter(f, dialect=dialect, encoding=encoding, **kwargs)
        for row in data(table):
            writer.writerow(row)
コード例 #10
0
def tojsonarrays(table, source=None, prefix=None, suffix=None,
                 output_header=False, *args, **kwargs):
    """
    Write a table in JSON format, with rows output as JSON arrays. E.g.::

        >>> from petl import tojsonarrays, look
        >>> look(table)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | 1     |
        +-------+-------+
        | 'b'   | 2     |
        +-------+-------+
        | 'c'   | 2     |
        +-------+-------+

        >>> tojsonarrays(table, 'example.json')
        >>> # check what it did
        ... with open('example.json') as f:
        ...     print f.read()
        ...
        [["a", 1], ["b", 2], ["c", 2]]

    Note that this is currently not streaming, all data is loaded into memory
    before being written to the file.

    Supports transparent writing to ``.gz`` and ``.bz2`` files.

    .. versionadded:: 0.11

    """

    encoder = JSONEncoder(*args, **kwargs)
    source = write_source_from_arg(source)
    if output_header:
        obj = list(table)
    else:
        obj = list(data(table))
    with source.open_('wb') as f:
        if prefix is not None:
            f.write(prefix)
        for chunk in encoder.iterencode(obj):
            f.write(chunk)
        if suffix is not None:
            f.write(suffix)
コード例 #11
0
def itercrossjoin(sources, prefix):

    # construct fields
    outflds = list()
    for i, s in enumerate(sources):
        if prefix:
            # use one-based numbering
            outflds.extend([str(i+1) + '_' + str(f) for f in header(s)])
        else:
            outflds.extend(header(s))
    yield tuple(outflds)

    datasrcs = [data(src) for src in sources]
    for prod in itertools.product(*datasrcs):
        outrow = list()
        for row in prod:
            outrow.extend(row)
        yield tuple(outrow)
コード例 #12
0
ファイル: csv.py プロジェクト: wstranger/petl
def toucsv(table, source=None, dialect=csv.excel, encoding='utf-8',
           write_header=True, **kwargs):
    """
    Write the table to a CSV file via the given encoding. Like :func:`tocsv` but
    accepts an additional ``encoding`` argument which should be one of the
    Python supported encodings. See also :mod:`codecs`.

    .. versionadded:: 0.19
    """
    source = write_source_from_arg(source)
    with source.open_('wb') as f:
        writer = UnicodeWriter(f, dialect=dialect, encoding=encoding, **kwargs)
        # User specified no header
        if not write_header:
            for row in data(table):
                writer.writerow(row)
        # Default behavior, write the header
        else:
            for row in table:
                writer.writerow(row)
コード例 #13
0
ファイル: csv.py プロジェクト: talwai/petl
def toucsv(table, source=None, dialect=csv.excel, encoding='utf-8',
           write_header=True, **kwargs):
    """
    Write the table to a CSV file via the given encoding. Like :func:`tocsv` but
    accepts an additional ``encoding`` argument which should be one of the
    Python supported encodings. See also :mod:`codecs`.

    .. versionadded:: 0.19
    """
    source = write_source_from_arg(source)
    with source.open_('wb') as f:
        writer = UnicodeWriter(f, dialect=dialect, encoding=encoding, **kwargs)
        # User specified no header
        if not write_header:
            for row in data(table):
                writer.writerow(row)
        # Default behavior, write the header
        else:
            for row in table:
                writer.writerow(row)
コード例 #14
0
ファイル: csv.py プロジェクト: wstranger/petl
def appendcsv(table, source=None, dialect=csv.excel, **kwargs):
    """
    Append data rows to an existing CSV file. E.g.::

        >>> # look at an existing CSV file
        ... from petl import look, fromcsv
        >>> testcsv = fromcsv('test.csv')
        >>> look(testcsv)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | '1'   |
        +-------+-------+
        | 'b'   | '2'   |
        +-------+-------+
        | 'c'   | '2'   |
        +-------+-------+

        >>> # append some data
        ... look(table)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'd'   | 7     |
        +-------+-------+
        | 'e'   | 42    |
        +-------+-------+
        | 'f'   | 12    |
        +-------+-------+

        >>> from petl import appendcsv
        >>> appendcsv(table, 'test.csv')
        >>> # look what it did
        ... look(testcsv)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | '1'   |
        +-------+-------+
        | 'b'   | '2'   |
        +-------+-------+
        | 'c'   | '2'   |
        +-------+-------+
        | 'd'   | '7'   |
        +-------+-------+
        | 'e'   | '42'  |
        +-------+-------+
        | 'f'   | '12'  |
        +-------+-------+

    The `filename` argument is the path of the delimited file, all other keyword
    arguments are passed to :func:`csv.writer`. So, e.g., to override the
    delimiter from the default CSV dialect, provide the `delimiter` keyword
    argument.

    Note that no attempt is made to check that the fields or row lengths are
    consistent with the existing data, the data rows from the table are simply
    appended to the file. See also the :func:`cat` function.

    Supports transparent writing to ``.gz`` and ``.bz2`` files.

    """

    source = write_source_from_arg(source)
    with source.open_('ab') as f:
        writer = csv.writer(f, dialect=dialect, **kwargs)
        for row in data(table):
            writer.writerow(row)
コード例 #15
0
ファイル: reshape.py プロジェクト: podpearson/petl
 def __iter__(self):
     for row in data(self.table):
         for value in row:
             yield value
コード例 #16
0
ファイル: io.py プロジェクト: deytao/petl
def _insert(cursor, tablename, placeholders, table):    
    insertquery = 'INSERT INTO %s VALUES (%s)' % (tablename, placeholders)
    for row in data(table):
        cursor.execute(insertquery, row)
コード例 #17
0
ファイル: test_util.py プロジェクト: wstranger/petl
def test_data():
    table = (('foo', 'bar'), ('a', 1), ('b', 2))
    actual = data(table)
    expect = (('a', 1), ('b', 2))
    ieq(expect, actual)
コード例 #18
0
ファイル: reshape.py プロジェクト: talwai/petl
 def __iter__(self):
     for row in data(self.table):
         for value in row:
             yield value
コード例 #19
0
ファイル: csv.py プロジェクト: talwai/petl
def appendcsv(table, source=None, dialect=csv.excel, **kwargs):
    """
    Append data rows to an existing CSV file. E.g.::

        >>> # look at an existing CSV file
        ... from petl import look, fromcsv
        >>> testcsv = fromcsv('test.csv')
        >>> look(testcsv)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | '1'   |
        +-------+-------+
        | 'b'   | '2'   |
        +-------+-------+
        | 'c'   | '2'   |
        +-------+-------+

        >>> # append some data
        ... look(table)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'd'   | 7     |
        +-------+-------+
        | 'e'   | 42    |
        +-------+-------+
        | 'f'   | 12    |
        +-------+-------+

        >>> from petl import appendcsv
        >>> appendcsv(table, 'test.csv')
        >>> # look what it did
        ... look(testcsv)
        +-------+-------+
        | 'foo' | 'bar' |
        +=======+=======+
        | 'a'   | '1'   |
        +-------+-------+
        | 'b'   | '2'   |
        +-------+-------+
        | 'c'   | '2'   |
        +-------+-------+
        | 'd'   | '7'   |
        +-------+-------+
        | 'e'   | '42'  |
        +-------+-------+
        | 'f'   | '12'  |
        +-------+-------+

    The `filename` argument is the path of the delimited file, all other keyword
    arguments are passed to :func:`csv.writer`. So, e.g., to override the
    delimiter from the default CSV dialect, provide the `delimiter` keyword
    argument.

    Note that no attempt is made to check that the fields or row lengths are
    consistent with the existing data, the data rows from the table are simply
    appended to the file. See also the :func:`cat` function.

    Supports transparent writing to ``.gz`` and ``.bz2`` files.

    """

    source = write_source_from_arg(source)
    with source.open_('ab') as f:
        writer = csv.writer(f, dialect=dialect, **kwargs)
        for row in data(table):
            writer.writerow(row)