def synthesis(prepare_res, slices, job):
    dw_passed, _ = prepare_res
    # Using set_slice on a dataset that was written in analysis is not
    # actually supported, but since it currently works (as long as that
    # particular slice wasn't written in analysis) let's test it.
    dw_passed.set_slice(0)
    dw_passed.write(**{k: v[0] for k, v in test_data.data.items()})
    dw_synthesis_split = DatasetWriter(name="synthesis_split", hashlabel="a")
    dw_synthesis_split.add("a", "int32")
    dw_synthesis_split.add("b", "unicode")
    dw_synthesis_split.get_split_write()(1, "a")
    dw_synthesis_split.get_split_write_list()([2, "b"])
    dw_synthesis_split.get_split_write_dict()({"a": 3, "b": "c"})
    dw_synthesis_manual = job.datasetwriter(name="synthesis_manual",
                                            columns={"sliceno": "int32"})
    dw_nonetest = job.datasetwriter(name="nonetest",
                                    columns={t: t
                                             for t in test_data.data})
    for sliceno in range(slices):
        dw_synthesis_manual.set_slice(sliceno)
        dw_synthesis_manual.write(sliceno)
        dw_nonetest.set_slice(sliceno)
        dw_nonetest.write(
            **{
                k: v[0] if k in test_data.not_none_capable else None
                for k, v in test_data.data.items()
            })
Exemple #2
0
def prepare(params):
    d = datasets.source
    caption = options.caption % dict(caption=d.caption,
                                     hashlabel=options.hashlabel)
    if len(
            d.chain(stop_ds={datasets.previous: 'source'},
                    length=options.length)) == 1:
        filename = d.filename
    else:
        filename = None
    dws = []
    previous = datasets.previous
    for sliceno in range(params.slices):
        if options.as_chain and sliceno == params.slices - 1:
            name = "default"
        else:
            name = str(sliceno)
        dw = DatasetWriter(
            caption="%s (slice %d)" % (caption, sliceno),
            hashlabel=options.hashlabel,
            filename=filename,
            previous=previous,
            name=name,
            for_single_slice=sliceno,
        )
        previous = (params.jobid, name)
        dws.append(dw)
    names = []
    for n, c in d.columns.items():
        # names has to be in the same order as the add calls
        # so the iterator returns the same order the writer expects.
        names.append(n)
        for dw in dws:
            dw.add(n, c.type)
    return dws, names, caption, filename
Exemple #3
0
def prepare(params):
    assert params.slices >= 2, "Hashing won't do anything with just one slice"
    dws = DotDict()
    # all the numeric types should hash the same (for values they have in common)
    for name, hashlabel, typ in (
        ("unhashed_manual", None, "int32"),  # manually interlaved
        ("unhashed_split", None, "int64"),  # split_write interlaved
        ("up_checked", "up", "float32"),  # hashed on up using dw.hashcheck
        ("up_split", "up", "float64"),  # hashed on up using split_write
        ("down_checked", "down",
         "bits32"),  # hashed on down using dw.hashcheck
        ("down_discarded", "down",
         "bits64"),  # hashed on down using discarding writes
        ("down_discarded_list", "down",
         "number"),  # hashed on down using discarding list writes
        ("down_discarded_dict", "down",
         "complex32"),  # hashed on down using discarding dict writes
            # we have too many types, so we need more datasets
        ("unhashed_complex64", None, "complex64"),
        ("unhashed_bytes", None, "bytes"),
        ("up_ascii", "up", "ascii"),
        ("down_unicode", "down", "unicode"),
            # datetime on 1970-01-01 hashes like time
        ("up_datetime", "up", "datetime"),
        ("down_time", "down", "time"),
            # date doesn't hash the same as anything else, so compare it to itself
        ("up_date", "up", "date"),
        ("down_date", "down", "date"),
    ):
        dw = DatasetWriter(name=name, hashlabel=hashlabel)
        dw.add("up", typ)
        dw.add("down", typ)
        dws[name] = dw
    return dws
def prepare(job, slices):
    assert slices >= test_data.value_cnt
    dw_default = DatasetWriter()
    dw_default.add("a", "number")
    dw_default.add("b", "ascii")
    DatasetWriter(name="named", columns={"c": "bool", "d": "date"})
    dw_passed = job.datasetwriter(name="passed", columns=test_data.columns)
    return dw_passed, 42
Exemple #5
0
def prepare(params):
    dws = {}
    prev = None
    for name in "abcdefgh":
        dw = DatasetWriter(name=name, previous=prev)
        dw.add("ds", "ascii")
        dw.add("num", "number")
        dws[name] = dw
        prev = dw
    return dws
Exemple #6
0
def test_filter_bad_with_rename_and_chain():
    dw = DatasetWriter(name="filter bad with rename",
                       allow_missing_slices=True)
    dw.add('a', 'ascii')
    dw.add('b', 'bytes')
    dw.add('c', 'unicode')
    dw.set_slice(0)
    dw.write('0', b'1', '2')
    dw.write('9', B'A', 'B')
    dw.write('C', B'D', 'E')
    source_ds = dw.finish()
    jid = subjobs.build(
        'dataset_type',
        column2type=dict(b='int32_10', c='int64_16', d='int32_16'),
        filter_bad=True,
        rename=dict(a='b', b='c', c='d'),
        source=source_ds,
    )
    typed_ds = jid.dataset()
    coltypes = sorted(
        (name, col.type) for name, col in typed_ds.columns.items())
    assert coltypes == [('b', 'int32'), ('c', 'int64'),
                        ('d', 'int32')], coltypes
    assert list(typed_ds.iterate(0)) == [(0, 1, 2), (9, 10, 11)]
    bad_ds = jid.dataset('bad')
    coltypes = sorted((name, col.type) for name, col in bad_ds.columns.items())
    assert coltypes == [('b', 'ascii'), ('c', 'bytes'),
                        ('d', 'unicode')], coltypes
    assert list(bad_ds.iterate(0)) == [('C', b'D', 'E')]

    dw = DatasetWriter(name="filter bad with rename chain",
                       allow_missing_slices=True,
                       previous=source_ds)
    dw.add('a', 'ascii')
    dw.add('b', 'ascii')
    dw.add('c', 'ascii')
    dw.set_slice(0)
    dw.write('3', '4', '5')
    dw.write('6', '7', 'eight')
    source_ds = dw.finish()
    jid = subjobs.build(
        'dataset_type',
        column2type=dict(a='number', b='int32_10', c='int64_10'),
        defaults=dict(a='8'),
        filter_bad=True,
        rename=dict(a='b', b='c', c='a'),
        source=source_ds,
    )
    typed_ds = jid.dataset()
    coltypes = sorted(
        (name, col.type) for name, col in typed_ds.columns.items())
    assert coltypes == [('a', 'number'), ('b', 'int32'),
                        ('c', 'int64')], coltypes
    assert list(typed_ds.iterate(0)) == [(2, 0, 1), (5, 3, 4), (8, 6, 7)]
    bad_ds = jid.dataset('bad')
    coltypes = sorted((name, col.type) for name, col in bad_ds.columns.items())
    assert coltypes == [('a', 'unicode'), ('b', 'ascii'),
                        ('c', 'bytes')], coltypes
    assert list(bad_ds.iterate(0)) == [('B', '9', b'A'), ('E', 'C', b'D')]
def test_column_discarding():
	dw = DatasetWriter(name='column discarding')
	dw.add('a', 'bytes')
	dw.add('b', 'bytes')
	dw.add('c', 'bytes')
	w = dw.get_split_write()
	w(b'a', b'b', b'c')
	source = dw.finish()

	# Discard b because it's not typed
	ac_implicit = subjobs.build(
		'dataset_type',
		source=source,
		column2type=dict(a='ascii', c='ascii'),
		discard_untyped=True,
	).dataset()
	assert sorted(ac_implicit.columns) == ['a', 'c'], '%s: %r' % (ac_implicit, sorted(ac_implicit.columns),)
	assert list(ac_implicit.iterate(None)) == [('a', 'c',)], ac_implicit

	# Discard b explicitly
	ac_explicit = subjobs.build(
		'dataset_type',
		source=source,
		column2type=dict(a='ascii', c='ascii'),
		rename=dict(b=None),
	).dataset()
	assert sorted(ac_explicit.columns) == ['a', 'c'], '%s: %r' % (ac_explicit, sorted(ac_explicit.columns),)
	assert list(ac_explicit.iterate(None)) == [('a', 'c',)], ac_explicit

	# Discard c by overwriting it with b. Keep untyped b.
	ac_bASc = subjobs.build(
		'dataset_type',
		source=source,
		column2type=dict(a='ascii', c='ascii'),
		rename=dict(b='c'),
	).dataset()
	assert sorted(ac_bASc.columns) == ['a', 'b', 'c'], '%s: %r' % (ac_bASc, sorted(ac_bASc.columns),)
	assert list(ac_bASc.iterate(None)) == [('a', b'b', 'b',)], ac_bASc

	# Discard c by overwriting it with b. Also type b as a different type.
	abc_bASc = subjobs.build(
		'dataset_type',
		source=source,
		column2type=dict(a='ascii', b='strbool', c='ascii'),
		rename=dict(b='c'),
	).dataset()
	assert sorted(abc_bASc.columns) == ['a', 'b', 'c'], '%s: %r' % (abc_bASc, sorted(abc_bASc.columns),)
	assert list(abc_bASc.iterate(None)) == [('a', True, 'b',)], abc_bASc
Exemple #8
0
def test_rehash_with_empty_slices():
    dw = DatasetWriter(name='rehash with empty slices', hashlabel='a')
    dw.add('a', 'ascii')
    dw.add('b', 'ascii')
    w = dw.get_split_write()
    w('a', '42')
    w('42', 'b')
    source = dw.finish()
    hashfunc = typed_writer('int32').hash

    def verify_hashing(caption, want_values, **kw):
        ds = subjobs.build('dataset_type',
                           source=source,
                           column2type=dict(a='int32_10'),
                           caption=caption,
                           **kw).dataset()
        got_values = set()
        for sliceno in range(g.slices):
            for got in ds.iterate(sliceno):
                assert hashfunc(got[0]) % g.slices == sliceno
                assert got not in got_values
                got_values.add(got)
        assert want_values == got_values

    verify_hashing('with discard', {(
        42,
        'b',
    )}, filter_bad=True)
    # using defaults uses some different code paths
    verify_hashing('with default=0 (probably two slices)', {(
        0,
        '42',
    ), (
        42,
        'b',
    )},
                   defaults=dict(a='0'))
    verify_hashing('with default=42 (one slice)', {(
        42,
        '42',
    ), (
        42,
        'b',
    )},
                   defaults=dict(a='42'))
def synthesis(job):
    manual_chain = [Dataset(jobids.selfchain, name) for name in "abcdefgh"]
    manual_abf = [manual_chain[0], manual_chain[1], manual_chain[5]]
    # build a local abf chain
    prev = None
    for ix, ds in enumerate(manual_abf):
        name = "abf%d" % (ix, )
        prev = ds.link_to_here(name, override_previous=prev)
    manual_abf_data = list(Dataset.iterate_list(None, None, manual_abf))
    local_abf_data = list(Dataset(job, "abf2").iterate_chain(None, None))
    assert manual_abf_data == local_abf_data
    # disconnect h, verify there is no chain
    manual_chain[-1].link_to_here("alone", override_previous=None)
    assert len(Dataset(job, "alone").chain()) == 1
    # check that the original chain is unhurt
    assert manual_chain == manual_chain[-1].chain()

    # So far so good, now make a chain long enough to have a cache.
    prev = None
    ix = 0
    going = True
    while going:
        if prev and "cache" in prev._data:
            going = False
        name = "longchain%d" % (ix, )
        dw = DatasetWriter(name=name, previous=prev)
        dw.add("ix", "number")
        dw.get_split_write()(ix)
        prev = dw.finish()
        ix += 1
    # we now have a chain that goes one past the first cache point
    full_chain = Dataset(prev).chain()
    assert "cache" in full_chain[
        -2]._data  # just to check the above logic is correct
    assert "cache" not in full_chain[-1]._data  # just to be sure..
    full_chain[-2].link_to_here("nocache", override_previous=None)
    full_chain[-1].link_to_here("withcache", override_previous=full_chain[-3])
    assert "cache" not in Dataset(job, "nocache")._data
    assert "cache" in Dataset(job, "withcache")._data
    # And make sure they both get the right data too.
    assert list(Dataset(prev).iterate_chain(None, "ix")) == list(range(ix))
    assert list(Dataset(job, "nocache").iterate_chain(None, "ix")) == [ix - 2]
    assert list(Dataset(job, "withcache").iterate_chain(
        None, "ix")) == list(range(ix - 2)) + [ix - 1]
Exemple #10
0
def prepare(params):
    assert params.slices >= 2, "Hashing won't do anything with just one slice"
    dws = DotDict()
    for name, hashlabel in (
        ("unhashed_manual", None),  # manually interlaved
        ("unhashed_split", None),  # split_write interlaved
        ("up_checked", "up"),  # hashed on up using dw.hashcheck
        ("up_split", "up"),  # hashed on up using split_write
        ("down_checked", "down"),  # hashed on down using dw.hashcheck
        ("down_discarded", "down"),  # hashed on down using discarding writes
        ("down_discarded_list",
         "down"),  # hashed on down using discarding list writes
        ("down_discarded_dict",
         "down"),  # hashed on down using discarding dict writes
    ):
        dw = DatasetWriter(name=name, hashlabel=hashlabel)
        dw.add("up", "int32")
        dw.add("down", "int32")
        dws[name] = dw
    return dws
def test_filter_bad_across_types():
    columns = {
        'bytes': 'bytes',
        'float64': 'bytes',
        'int32_10': 'ascii',
        'json': 'unicode',
        'number:int': 'unicode',
        'unicode:utf-8': 'bytes',
    }
    # all_good, *values
    # Make sure all those types (except bytes) can filter other lines,
    # and be filtered by other lines. And that several filtering values
    # is not a problem (line 11).
    data = [
        [
            True,
            b'first',
            b'1.1',
            '1',
            '"a"',
            '001',
            b'ett',
        ],
        [
            True,
            b'second',
            b'2.2',
            '2',
            '"b"',
            '02',
            b'tv\xc3\xa5',
        ],
        [
            True,
            b'third',
            b'3.3',
            '3',
            '["c"]',
            '3.0',
            b'tre',
        ],
        [
            False,
            b'fourth',
            b'4.4',
            '4',
            '"d"',
            '4.4',
            b'fyra',
        ],  # number:int bad
        [
            False,
            b'fifth',
            b'5.5',
            '-',
            '"e"',
            '5',
            b'fem',
        ],  # int32_10 bad
        [
            False,
            b'sixth',
            b'6.b',
            '6',
            '"f"',
            '6',
            b'sex',
        ],  # float64 bad
        [
            False,
            b'seventh',
            b'7.7',
            '7',
            '{"g"}',
            '7',
            b'sju',
        ],  # json bad
        [
            False,
            b'eigth',
            b'8.8',
            '8',
            '"h"',
            '8',
            b'\xa5\xc3tta',
        ],  # unicode:utf-8 bad
        [
            True,
            b'ninth',
            b'9.9',
            '9',
            '"i"',
            '9',
            b'nio',
        ],
        [
            True,
            b'tenth',
            b'10',
            '10',
            '"j"',
            '10',
            b'tio',
        ],
        [
            False,
            b'eleventh',
            b'11a',
            '1-',
            '"k",',
            '1,',
            b'elva',
        ],  # float64, int32_10 and number:int bad
        [
            True,
            b'twelfth',
            b'12',
            '12',
            '"l"',
            '12',
            b'tolv',
        ],
    ]
    dw = DatasetWriter(name="filter bad across types", columns=columns)
    cols_to_check = ['int32_10', 'bytes', 'json', 'unicode:utf-8']
    if PY3:
        # z so it sorts last.
        dw.add('zpickle', 'pickle')
        cols_to_check.append('zpickle')
        for ix in range(len(data)):
            data[ix].append({ix})
    dw.set_slice(0)
    want = []

    def add_want(ix):
        v = data[ix]
        want.append((
            int(v[3]),
            v[1],
            json.loads(v[4]),
            v[6].decode('utf-8'),
        ))
        if PY3:
            want[-1] = want[-1] + (v[7], )

    for ix, v in enumerate(data):
        if v[0]:
            add_want(ix)
        dw.write(*v[1:])
    for sliceno in range(1, g.slices):
        dw.set_slice(sliceno)
    source_ds = dw.finish()
    # Once with just filter_bad, once with some defaults too.
    defaults = {}
    for _ in range(2):
        jid = subjobs.build(
            'dataset_type',
            datasets=dict(source=source_ds),
            options=dict(column2type={t: t
                                      for t in columns},
                         filter_bad=True,
                         defaults=defaults),
        )
        typed_ds = Dataset(jid)
        got = list(typed_ds.iterate(0, cols_to_check))
        assert got == want, "Exptected %r, got %r from %s (from %r%s)" % (
            want, got, typed_ds, source_ds,
            ' with defaults' if defaults else '')
        # make more lines "ok" for the second lap
        defaults = {'number:int': '0', 'float64': '0', 'json': '"replacement"'}
        add_want(3)
        add_want(5)
        data[6][4] = '"replacement"'
        add_want(6)
        want.sort()  # adding them out of order, int32_10 sorts correctly.
def mk_dw(name, cols, **kw):
    dw = DatasetWriter(name=name, **kw)
    for colname in cols:
        dw.add(colname, "unicode")
    return dw
Exemple #13
0
def prepare(job, slices):
	# use 256 as a marker value, because that's not a possible char value (assuming 8 bit chars)
	lf_char = char2int("newline", 256)
	# separator uses lf_char or \n as the empty value, because memchr might mishandle 256.
	separator = char2int("separator", 10 if lf_char == 256 else lf_char)
	comment_char = char2int("comment", 256)
	if options.quotes == 'True':
		quote_char = 256
	elif options.quotes == 'False':
		quote_char = 257
	else:
		quote_char = char2int("quotes", 257, "True/False/empty")
	filename = os.path.join(job.source_directory, options.filename)
	orig_filename = filename
	assert 1 <= options.compression <= 9

	fds = [os.pipe() for _ in range(slices)]
	read_fds = [t[0] for t in fds]
	write_fds = [t[1] for t in fds]

	if options.labelsonfirstline:
		labels_rfd, labels_wfd = os.pipe()
	else:
		labels_wfd = -1
	success_rfd, success_wfd = os.pipe()
	status_rfd, status_wfd = os.pipe()

	p = Process(target=reader_process, name="reader", args=(slices, filename, write_fds, labels_wfd, success_wfd, status_wfd, comment_char, lf_char))
	p.start()
	for fd in write_fds:
		os.close(fd)
	os.close(success_wfd)
	os.close(status_wfd)

	if options.labelsonfirstline:
		os.close(labels_wfd)
		# re-use import logic
		out_fns = ["labels"]
		r_num = cstuff.mk_uint64(3)
		res = cstuff.backend.import_slice(*cstuff.bytesargs(labels_rfd, -1, -1, -1, out_fns, b"wb1", separator, r_num, quote_char, lf_char, 0))
		os.close(labels_rfd)
		assert res == 0, "c backend failed in label parsing"
		with typed_reader("bytes")("labels") as fh:
			labels_from_file = [lab.decode("utf-8", "backslashreplace") for lab in fh]
		os.unlink("labels")
	else:
		labels_from_file = None

	labels = options.labels or labels_from_file
	assert labels, "No labels"
	labels = [options.rename.get(x, x) for x in labels]
	assert '' not in labels, "Empty label for column %d" % (labels.index(''),)
	assert len(labels) == len(set(labels)), "Duplicate labels: %r" % (labels,)

	dw = DatasetWriter(
		columns={n: 'bytes' for n in labels if n not in options.discard},
		filename=orig_filename,
		caption='csvimport of ' + orig_filename,
		previous=datasets.previous,
		meta_only=True,
	)
	if options.lineno_label:
		dw.add(options.lineno_label, "int64")

	if options.allow_bad:
		bad_dw = DatasetWriter(
			name="bad",
			columns=dict(lineno="int64", data="bytes"),
			caption='bad lines from csvimport of ' + orig_filename,
			meta_only=True,
		)
	else:
		bad_dw = None

	if options.comment or options.skip_lines:
		skipped_dw = DatasetWriter(
			name="skipped",
			columns=dict(lineno="int64", data="bytes"),
			caption='skipped lines from csvimport of ' + orig_filename,
			meta_only=True,
		)
	else:
		skipped_dw = None

	return separator, quote_char, lf_char, filename, orig_filename, labels, dw, bad_dw, skipped_dw, read_fds, success_rfd, status_rfd,
def prepare():
    dw = DatasetWriter()
    dw.add("str", "ascii")
    dw.add("num", "number")
    return dw