def test_leveldb_log_small(spec_log, data_log_small): board = model.Board() board.add_spec('Log', spec_log) board.add_data_source('data', data_log_small) dtree = board.eval_expr('data <> Log.LogFile') assert model.make_python_object(dtree.head_blocks) == [] records = dtree.tail_block.records assert len(records) == 2 assert records[0].checksum == 0xC227CC1B assert records[0].length == 33 assert records[0].rtype == 1 assert len(records[0].data) == 33 assert dtree.eval_expr('sizeof(tail_block.records[0])') == 40 assert records[1].checksum == 0x6EC2C495 assert records[1].length == 39 assert records[1].rtype == 1 assert len(records[1].data) == 39 assert dtree.eval_expr('sizeof(tail_block.records[1])') == 46 assert dtree.eval_expr('sizeof(tail_block.records)') == 86 assert dtree.eval_expr('sizeof(tail_block)') == 86 with pytest.raises(IndexError): dummy = records[2] with pytest.raises(ValueError): dtree.eval_expr('tail_block.records[2]')
def test_invalid_bp(params_invalid_bp): params = params_invalid_bp spec = params['spec'] with pytest.raises(OSError): board = model.Board() board.add_spec('Spec', spec)
def test_bp(params_bp): params = params_bp spec = params['spec'] data = 'foobarfoobarfoobarfoobarfoobar' board = model.Board() board.add_spec('Spec', spec) board.add_data_source('data', data) dtree = board.eval_expr('data <> Spec.Schema')
def test_tinydb(spec, data_ok1): board = model.Board() board.add_spec('Spec', spec) board.add_data_source('data', data_ok1) dom = board.eval_expr('data <> Spec.Schema') assert len(dom['values']) == 3 values = dom['values'] assert str(values[1].key_value) == 'size' with pytest.raises(IndexError): a = values[3].key_value with pytest.raises(TypeError): b = values[-1].key_value expected_keys = ['color', 'size', 'description'] niter = 0 for item, expected_key in zip(values, expected_keys): assert str(item.key_value) == expected_key niter += 1 assert niter == 3 expected_attr_list = [ 'flags', 'key_size', 'key_value', 'value_size', 'value' ] expected_first_item_items = [('flags', 0), ('key_size', 5), ('key_value', 'color'), ('value_size', 3), ('value', 'red')] first_item = True for item in values: # item is a mapping because TinyDBValue is a 'struct' assert list(item.iter_keys()) == expected_attr_list if first_item: #FIXME #assert model.make_python_object( # item.iter_items())) == expected_first_item_items first_item = False description = [ item for item in values if str(item.key_value) == 'description' ][0] item = values[1] assert 'key_value' in item assert str(item.key_value) == 'size' assert str(item['key_value']) == 'size' assert model.make_python_object(item.value) == 'two feet' assert model.make_python_object(item['value']) == 'two feet' assert 'foo' not in item with pytest.raises(AttributeError): print(item.foo) with pytest.raises(KeyError): print(item['foo']) with pytest.raises(IndexError): print(item[42])
def test_syntax_errors(): #FIXME use custom error for syntax errors board = model.Board() with pytest.raises(OSError): board.add_spec('Spec', spec_unknown_type_name_1) with pytest.raises(OSError): board.add_spec('Spec', spec_unknown_scope_left_operand) with pytest.raises(OSError): board.add_spec('Spec', spec_unknown_scoped_member)
def __init__(self): self._init_config() self.history_file = os.path.join(self.config_dir, HISTORY_FILE_NAME) super(CLI, self).__init__() self.intro = '*** BitPunch command-line interface ***' self.prompt = 'bitpunch> ' self.expr_operators_delim = (' ', '(', ')', '&', '<>') self.board = model.Board() self.using_spec_file = None
def test_ldb(spec_ldb): nb_entries = 237 board = model.Board() board.add_spec('LDB', spec_ldb) ldb_dir = os.path.dirname(os.path.realpath(__file__)) board.add_data_source('data', path='{0}/test1.ldb'.format(ldb_dir)) dtree = board.eval_expr('data <> LDB.LDBFile') index = dtree.eval_expr('?index') assert index.offset == 265031 assert index.size == 5676 index_block = index['?stored_block'] # # 5 more bytes than the stored size because block size includes # # the trailer # assert index_block.get_location() == (265031, 5681) assert len(index_block.entries) == nb_entries last_index = None for i, entry in enumerate(index_block.entries): last_index = i assert last_index == nb_entries - 1 assert len(index_block.restarts) == index_block.nb_restarts assert index_block.restarts.get_size() == index_block.nb_restarts * 4 assert index_block.restarts.get_size() == 4 * index_block.nb_restarts # get a heading child block child_handle = index_block.eval_expr('entries[1].value <> LDB.BlockHandle') assert child_handle.offset == 959 assert child_handle.size == 1423 child_block = child_handle['?stored_block'] assert child_block.get_location() == (959, 1428) assert child_block.trailer.blocktype == 1 # compressed assert len(child_block.entries) == 5 assert len(child_block.entries[2].value) == 1022 # get an intermediate child block child_handle = index_block.eval_expr('entries[42].value <> LDB.BlockHandle') assert child_handle.offset == 33953 assert child_handle.size == 821 child_block = child_handle['?stored_block'] assert child_block.get_location() == (33953, 826) assert child_block.trailer.blocktype == 1 # compressed assert len(child_block.entries) == 9 assert len(child_block.entries[4].value) == 479 # location is relative to the uncompressed block assert child_block.entries[4].value.get_location() == (1990, 479) # some more complex expression tests assert index_block.eval_expr( '(entries[42].value <> LDB.BlockHandle).offset') == 33953 # this used to trigger a SEGV because of missing compilation step # for "[] byte" filter assert index_block.eval_expr( '(entries[42].value <> [] byte <> LDB.BlockHandle).offset') == 33953
def test_circular_dependency(params_circular_dependency): params = params_circular_dependency spec = params['spec'] circular = params['circular'] board = model.Board() if circular: with pytest.raises(OSError): board.add_spec('Spec', spec) else: board.add_spec('Spec', spec)
def test_span_lazy_eval_error(): data = conftest.to_bytes(data_span_lazy_eval_error) board = model.Board() board.add_data_source('data', data) board.add_spec('Spec', spec_span_lazy_eval) board.add_expr('dtree', 'data <> Spec.Schema') # lazy span size evaluation should allow accessing fields that do # not depend on the entry size, even if the entry size is # incorrect (formatted size is "foo") assert board.eval_expr('dtree.entries[0].header.magic') == 'magic' with pytest.raises(model.DataError): print board.eval_expr('dtree.entries[0].header.size') assert board.eval_expr('^dtree.entries[0].header.size') == 'foo'
def test_user_filter_extern_use_spec(): board = model.Board() board.add_data_source('data', conftest.to_bytes(data_file_user_filter_extern)) board.use_spec(spec_file_user_filter_extern) board.register_filter('godify', Godify) dtree = board.eval_expr('data <> Schema') print model.make_python_object(dtree.eval_expr('contents')) assert model.make_python_object( dtree.eval_expr('contents')) == \ ["Do as she wants", "Do as He wants", "Do as thou want"]
def test_leveldb_log_empty(spec_log, data_log_empty): board = model.Board() board.add_spec('Log', spec_log) board.add_data_source('data', data_log_empty) dtree = board.eval_expr('data <> Log.LogFile') assert model.make_python_object(dtree.head_blocks) == [] assert model.make_python_object(dtree.tail_block.records) == [] assert len(dtree.head_blocks) == 0 assert len(dtree.tail_block.records) == 0 assert dtree.eval_expr('sizeof(head_blocks)') == 0 assert dtree.eval_expr('sizeof(tail_block.records)') == 0 with pytest.raises(IndexError): dtree.tail_block.records[0] with pytest.raises(ValueError): dtree.eval_expr('tail_block.records[0]')
def make_testcase(param): data = to_bytes(param['data']) board = model.Board() board.add_data_source('data', data) board.add_spec('Spec', param['spec']) try: param['dtree'] = board.eval_expr('data <> Spec.Schema') except (model.DataError, model.OutOfBoundsError, ValueError) as e: if 'args' in e and len(e.args) > 0 and 'description' in e.args[0]: print e.args[0]['description'] raise param['data'] = data param['board'] = board if 'BITPUNCH_TEST_ENABLE_CLI' in os.environ: cli = CLI() cli.attach_board(board) cli.cmdloop() return param
def test_leveldb_log_multiblock(spec_log, data_log_multiblock): board = model.Board() board.add_spec('Log', spec_log) board.add_data_source('data', data_log_multiblock) dtree = board.eval_expr('data <> Log.LogFile') assert len(dtree.head_blocks) == 1 assert len(dtree.head_blocks[0].records) == 762 assert len(dtree.tail_block.records) == 3 assert dtree.eval_expr('sizeof(head_blocks)') == 32768 assert dtree.eval_expr('sizeof(head_blocks[0])') == 32768 assert dtree.eval_expr('sizeof(head_blocks[0].records)') == 32766 assert dtree.eval_expr('sizeof(head_blocks[0].trailer)') == 2 assert model.make_python_object( dtree.eval_expr('head_blocks[0].trailer')) == '\x00\x00' assert dtree.eval_expr('sizeof(tail_block.records)') == 43 * 3 records = dtree.head_blocks[0].records dummy = records[761] with pytest.raises(IndexError): dummy = records[762]
def test_leveldb_log_browse(spec_log, data_log_empty, data_log_small): inputs = [data_log_empty, data_log_small] for data in inputs: board = model.Board() board.add_spec('Log', spec_log) board.add_data_source('data', data) dtree = board.eval_expr('data <> Log.LogFile') block_count = 0 for block in dtree.head_blocks: block_count += 1 record_count = 0 for record in block.records: record_count += 1 assert record_count == len(block.records) assert block_count == len(dtree.head_blocks) block = dtree.tail_block record_count = 0 for record in block.records: record_count += 1 assert record_count == len(block.records)
def make_testcase(param): update_test_file(param['file_data']) board = model.Board() board.add_spec('Spec', param['spec']) if 'data' in param: # data is provided externally to the spec (other data sources # may be defined in the spec but not top-level) data = conftest.to_bytes(param['data']) board.add_data_source('data', data) param['dtree'] = board.eval_expr('data <> Spec.Schema') param['data'] = data else: # data is defined in the spec as "data" item param['dtree'] = board.eval_expr('Spec.data') if 'BITPUNCH_TEST_ENABLE_CLI' in os.environ: cli = CLI() cli.attach_data_tree(param['dtree']) cli.cmdloop() return param
def test_user_function_extern_use_spec(): board = model.Board() board.use_spec(spec_file_user_function_extern) board.register_function('get_answer_to_universe', get_answer_to_universe) assert board.eval_expr('the_answer') == 42
def test_filter_invalid(params_filter_invalid): params = params_filter_invalid board = model.Board() spec = params['spec'] with pytest.raises(OSError): board.add_spec('Spec', spec)
def test_named_exprs_invalid(params_named_exprs_invalid): params = params_named_exprs_invalid board = model.Board() with pytest.raises(OSError): board.add_spec('Spec', params['spec'])
def test_resolve_types_and_expr(spec): board = model.Board() board.add_spec('Spec', spec)
ap.add_argument('input_file', type=argparse.FileType('r'), nargs=1, help='LDB input file') ap.add_argument('output_file', type=argparse.FileType('wb'), nargs='?', help='JSON output file') args = ap.parse_args() input_file = args.input_file[0] if args.output_file is not None: output_file = args.output_file else: output_file = sys.stdout sys.stderr.write('dumping LDB entries from {inf} to {outf}\n'.format( inf=input_file.name, outf=output_file.name)) script_dir = os.path.dirname(sys.argv[0]) leveldb_bp = model.find_handler(path='database.leveldb') board = model.Board() board.add_spec('LevelDB', leveldb_bp) board.add_data_source('db', input_file) ldb_model = board.eval_expr('db <> LevelDB.SSTFile') # here goes the magic :) n_dumped_records = records_dumper(records_extractor(ldb_model), output_file) sys.stderr.write('dumped {0} records\n'.format(n_dumped_records))