('F', 1)) table2 = (('foo', 'baz'), ('B', 3), ('D', 10)) from petl import annex, look look(table1) look(table2) table3 = annex(table1, table2) look(table3) # progress from petl import dummytable, progress, tocsv d = dummytable(100500) p = progress(d, 10000) tocsv(p, 'output.csv') # clock from petl import dummytable, clock, convert, progress, tocsv t1 = dummytable(100000) c1 = clock(t1) t2 = convert(c1, 'foo', lambda v: v**2) c2 = clock(t2) p = progress(c2, 10000) tocsv(p, 'dummy.csv') # time consumed retrieving rows from t1 c1.time
def test_from_petl(self): nrows = 10 ptbl = petl.dummytable(numrows=nrows) tbl = Table(ptbl) self.assertEqual(tbl.num_rows, nrows)
from __future__ import division, print_function, absolute_import # randomtable() ############### import petl as etl table = etl.randomtable(3, 100, seed=42) table # dummytable() ############## import petl as etl table1 = etl.dummytable(100, seed=42) table1 # customise fields import random from functools import partial fields = [('foo', random.random), ('bar', partial(random.randint, 0, 500)), ('baz', partial(random.choice, ['chocolate', 'strawberry', 'vanilla']))] table2 = etl.dummytable(100, fields=fields, seed=42) table2
from __future__ import division, print_function, absolute_import # progress() ############ import petl as etl table = etl.dummytable(100000) table.progress(10000).tocsv('example.csv') # clock() ######### import petl as etl t1 = etl.dummytable(100000) c1 = etl.clock(t1) t2 = etl.convert(c1, 'foo', lambda v: v**2) c2 = etl.clock(t2) p = etl.progress(c2, 10000) etl.tocsv(p, 'example.csv') # time consumed retrieving rows from t1 c1.time # time consumed retrieving rows from t2 c2.time # actual time consumed by the convert step c2.time - c1.time