def test_rf32_to_ri16(self): path = self.make_file("test_source", "rf32", 1) # expected expected_source = sigmf.source(path, "rf32", False) convert = blocks.float_to_short(1, 1) expected_sink = blocks.vector_sink_s(1) # actual actual_source = sigmf.source(path, "ri16", False) actual_sink = blocks.vector_sink_s(1) tb1 = gr.top_block() tb1.connect(expected_source, convert) tb1.connect(convert, expected_sink) tb1.run() tb1.wait() tb2 = gr.top_block() tb2.connect(actual_source, actual_sink) tb2.run() tb2.wait() e = expected_sink.data() a = actual_sink.data() np.testing.assert_almost_equal(e, a)
def test_ri8_to_ri32(self): path = self.make_file("test_source", "ri8", 1) # expected expected_source = sigmf.source(path, "ri8", False) convert1 = blocks.char_to_float(1, 1) convert2 = blocks.float_to_int(1, 1) expected_sink = blocks.vector_sink_i(1) # actual actual_source = sigmf.source(path, "ri32", False) actual_sink = blocks.vector_sink_i(1) tb1 = gr.top_block() tb1.connect(expected_source, convert1) tb1.connect(convert1, convert2) tb1.connect(convert2, expected_sink) tb1.run() tb1.wait() tb2 = gr.top_block() tb2.connect(actual_source, actual_sink) tb2.run() tb2.wait() e = expected_sink.data() a = actual_sink.data() np.testing.assert_almost_equal(e, a)
def test_bad_metafile(self): """Test that if a metafile does not contain valid json, then an exception should be thrown on initialization""" data, meta_json, filename, meta_file = self.make_file("bad_meta") with open(meta_file, "r+") as f: f.seek(0) # corrupt the first byte f.write("A") with self.assertRaises(RuntimeError): sigmf.source(filename, "cf32_le")
def test_annotations_to_tags(self): data, meta_json, filename, meta_file = self.make_file( "annotation_tags") # Add some annotations with open(meta_file, "r+") as f: data = json.load(f) data['annotations'].append({ "core:sample_start": 5, "test:string": "This is some string data", "test:more_data": True, }) data['annotations'].append({ "core:sample_start": 10, "test:rating": 12, }) # Write over f with a version with the new annotations f.seek(0) json.dump(data, f, indent=4) f.truncate() # run through the flowgraph file_source = sigmf.source(filename, "cf32_le") sink = blocks.vector_sink_c() collector = tag_collector() tb = gr.top_block() tb.connect(file_source, collector) tb.connect(collector, sink) tb.run() collector.assertTagExists(5, "test:string", "This is some string data") collector.assertTagExists(5, "test:more_data", True) collector.assertTagExists(10, "test:rating", 12)
def test_multiple_work_calls_tag_offsets(self): '''Test that if the work is called multiple times, tags still end up in the right places''' # generate a file num_samps = 4000000 data, meta_json, filename, meta_file = self.make_file("multi_work", N=num_samps) # Add a capture in the middle meta_json["captures"].append({ "core:sample_start": num_samps / 2, "test:a": 1 }) # and on the last sample meta_json["captures"].append({ "core:sample_start": num_samps - 1, "test:b": 2 }) with open(meta_file, "w") as f: json.dump(meta_json, f) file_source = sigmf.source(filename, "cf32_le") sink = blocks.vector_sink_c() collector = tag_collector() tb = gr.top_block() tb.connect(file_source, collector) tb.connect(collector, sink) tb.start() tb.wait() print(collector.tags) collector.assertTagExistsMsg(num_samps / 2, "test:a", 1, "missing tag!", self) collector.assertTagExistsMsg(num_samps - 1, "test:b", 2, "missing tag!", self)
def test_nonexistant_files(self): data, meta_json, filename, meta_file = self.make_file("no_meta") os.remove(meta_file) caught_exception = False try: sigmf.source(filename, "cf32_le") except: caught_exception = True assert caught_exception data, meta_json, filename, meta_file = self.make_file("no_data") os.remove(filename) caught_exception = False try: sigmf.source(filename) except: caught_exception = True assert caught_exception
def test_small_file(self): """test on a very small file""" data, meta_json, filename, meta_file = self.make_file("small_files", N=1) # run through the flowgraph file_source = sigmf.source(filename, "cf32_le") sink = blocks.vector_sink_c() tb = gr.top_block() tb.connect(file_source, sink) tb.run() # check that the data matches written_data = sink.data() self.assertComplexTuplesAlmostEqual(data, written_data)
def test_json_types(self): # generate a file data, meta_json, filename, meta_file = self.make_file("json_types") # Add annotations with all types with open(meta_file, "r+") as f: data = json.load(f) data['annotations'].append({ "core:sample_start": 1, "core:sample_count": 2, "test:int": -2, "test:int64": 278202993021, "test:uint": 2, "test:uint2": 2**32 + 2, "test:double": 2.2, "test:neg_double": -2.2, "test:bool1": True, "test:bool2": False, "test:null": None, "test:string": "foo", }) f.seek(0) json.dump(data, f, indent=4) f.truncate() # run through the flowgraph file_source = sigmf.source(filename, "cf32_le") sink = blocks.vector_sink_c() collector = tag_collector() tb = gr.top_block() tb.connect(file_source, collector) tb.connect(collector, sink) tb.start() tb.wait() # Check that all the types got read correctly collector.assertTagExists(1, "test:int", -2) collector.assertTagExists(1, "test:int64", 278202993021) collector.assertTagExists(1, "test:uint", 2) collector.assertTagExists(1, "test:uint2", 2**32 + 2) collector.assertTagExists(1, "test:double", 2.2) collector.assertTagExists(1, "test:neg_double", -2.2) collector.assertTagExists(1, "test:bool1", True) collector.assertTagExists(1, "test:bool2", False) collector.assertTagExists(1, "test:null", None) collector.assertTagExists(1, "test:string", "foo")
def test_normal_run(self): """Test a bog-standard run through a normal file and ensure that the data is correct""" # generate a file data, meta_json, filename, meta_file = self.make_file("normal") # run through the flowgraph file_source = sigmf.source(filename, "cf32_le") sink = blocks.vector_sink_c() tb = gr.top_block() tb.connect(file_source, sink) tb.run() # check that the data matches written_data = sink.data() self.assertComplexTuplesAlmostEqual(data, written_data)
def test_begin_tags(self): data, meta_json, filename, meta_file = self.make_file("begin") # run through the flowgraph file_source = sigmf.source(filename, "cf32_le") begin_tag = pmt.to_pmt("TEST") file_source.set_begin_tag(begin_tag) sink = blocks.vector_sink_c() collector = tag_collector() tb = gr.top_block() tb.connect(file_source, collector) tb.connect(collector, sink) tb.run() zero_offset_tags = [t for t in collector.tags if t["offset"] == 0] test_tag = [t for t in zero_offset_tags if t["key"] == "TEST"] self.assertEqual(len(test_tag), 1)
def test_cf32_to_ci8(self): path = self.make_file("test_source", "cf32", 10) # actual filename = os.path.join(self.test_dir, "test_sink") actual_source = sigmf.source(path, "ci8", False) actual_sink = sigmf.sink("ci8", filename) tb = gr.top_block() tb.connect(actual_source, actual_sink) tb.run() tb.wait() e = self.read_complex_data(path, 'f', 4) a = self.read_complex_data(filename, 'b', 1) np.testing.assert_almost_equal(e, a, decimal=0)
def test_roundtrip_offset_initial_capture(self): """Test that if the first capture segment has an offset, then it gets correctly offset and output when roundtripped from a source to a sink""" # generate a file data, meta_json, filename, meta_file = self.make_file("offset") # drop the first 4 samples adjust_size = 4 with open(meta_file, "r+") as f: fdata = json.load(f) fdata['captures'][0]["core:sample_start"] = adjust_size fdata['captures'][0]["core:frequency"] = 2.4e9 f.seek(0) json.dump(fdata, f, indent=4) f.truncate() data_start_size = os.path.getsize(filename) out_data_file, out_json_file = self.temp_file_names() file_source = sigmf.source(filename, "cf32_le") file_sink = sigmf.sink("cf32_le", out_data_file) tagd = blocks.tag_debug(gr.sizeof_gr_complex, "test") tb = gr.top_block() tb.connect(file_source, file_sink) tb.connect(file_source, tagd) tb.start() tb.wait() data_end_size = os.path.getsize(out_data_file) # end data size should be smaller dropped_samples = adjust_size * 2 * 4 self.assertEqual(data_start_size - dropped_samples, data_end_size, "Wrong data size") with open(out_json_file, "r") as f: meta = json.load(f) print(meta) self.assertEqual(len(meta["annotations"]), 0, "Shouldn't be any annotations in file") self.assertEqual(len(meta["captures"]), 1, "Should only be 1 capture segment in file") self.assertEqual(meta["captures"][0]["core:frequency"], 2.4e9, "frequency tag is missing")
def test_first_capture_segment_non_zero_start(self): '''Test to check that if the first capture segment has a non-zero start index, then we should skip that part of the file''' filename_data = os.path.join(self.test_dir, "capture_not_zero.sigmf-data") filename_meta = os.path.join(self.test_dir, "capture_not_zero.sigmf-meta") skip_samples = 500 normal_samples = 500 a = array.array("f") for i in range(skip_samples * 2): a.append(1) for i in range(normal_samples * 2): a.append(2) with open(filename_data, "w") as f: a.tofile(f) metadata = { "global": { "core:datatype": "cf32_le", "core:version": "0.0.1" }, "captures": [{ "core:sample_start": skip_samples }] } with open(filename_meta, "w") as f: json.dump(metadata, f) file_source = sigmf.source(filename_data, "cf32_le", repeat=False) sink = blocks.vector_sink_c() tb = gr.top_block() tb.connect(file_source, sink) tb.start() tb.wait() written_data = sink.data() for i in range(len(written_data)): assert (written_data[i] == (2 + 2j))
def test_command_message(self): data, meta_json, filename, meta_file = self.make_file("begin") # run through the flowgraph file_source = sigmf.source(filename, "cf32_le", repeat=True) msg = {"command": "set_begin_tag", "tag": "test"} generator = message_generator(msg) sink = blocks.vector_sink_c() collector = tag_collector() tb = gr.top_block() tb.msg_connect((generator, 'messages'), (file_source, 'command')) tb.connect(file_source, collector) tb.connect(collector, sink) tb.start() sleep(1) tb.stop() tb.wait() for tag in collector.tags: if tag["key"] != "rx_time": self.assertEqual(tag["key"], "test")
def test_capture_segments_to_tags(self): data, meta_json, filename, meta_file = self.make_file("capture_segs") # Add some capture segments with open(meta_file, "r+") as f: data = json.load(f) data['captures'].append({ "core:sample_start": 5, "core:frequency": 2.4e9, }) data['captures'].append({ "core:sample_start": 10, "core:frequency": 2.44e9, }) f.seek(0) json.dump(data, f, indent=4) f.truncate() # run through the flowgraph file_source = sigmf.source(filename, "cf32_le") begin_tag = pmt.to_pmt("TEST") file_source.set_begin_tag(begin_tag) sink = blocks.vector_sink_c() collector = tag_collector() tb = gr.top_block() tb.connect(file_source, collector) tb.connect(collector, sink) tb.run() # There should be 3 tags print(collector.tags) zero_offset_tags = [t for t in collector.tags if t["offset"] == 0] test_tag = [t for t in zero_offset_tags if t["key"] == "TEST"] self.assertEqual(len(test_tag), 1) collector.assertTagExists(5, "rx_freq", 2.4e9) collector.assertTagExists(10, "rx_freq", 2.44e9)
def test_tag_roundtrip(self): # write some data with both capture and annotation data freq = 2.4e9 samp_rate = 100e6 test_index = 1000 time = tuple([1222277384, .0625]) test_a = 22.3125 test_b = "asdf" test_c = True test_index_2 = 2000 test_d = 18.125 test_e = "jkl;" test_f = False injector = advanced_tag_injector([(0, { "rx_time": time }), (0, { "rx_freq": freq }), (0, { "rx_rate": samp_rate }), (test_index, { "test:a": test_a, "test:b": test_b, "test:c": test_c }), (test_index_2, { "test_d": test_d, "test_e": test_e, "test_f": test_f })]) src = analog.sig_source_c(0, analog.GR_CONST_WAVE, 0, 0, (1 + 1j)) num_samps = int(1e6) head = blocks.head(gr.sizeof_gr_complex, num_samps) data_file, json_file = self.temp_file_names() file_sink = sigmf.sink("cf32_le", data_file) tb = gr.top_block() tb.connect(src, head) tb.connect(head, injector) tb.connect(injector, file_sink) tb.start() tb.wait() # Make sure the data file got written self.assertTrue(os.path.exists(data_file), "Data file missing") self.assertEqual(os.path.getsize(data_file), gr.sizeof_gr_complex * num_samps, "Data file incomplete") # Ensure that the data exists as we think it should with open(json_file, "r") as f: meta_str = f.read() meta = json.loads(meta_str) print(meta) self.assertEqual(meta["captures"][0]["core:frequency"], freq, "Bad metadata, frequency") # Should only be one capture segment self.assertEqual(len(meta["captures"]), 1) self.assertEqual(meta["global"]["core:sample_rate"], samp_rate, "Bad metadata, samp_rate") self.assertEqual(meta["annotations"][0]["test:a"], test_a, "bad test_a value") self.assertEqual(meta["annotations"][0]["test:b"], test_b, "bad test_b value") self.assertEqual(meta["annotations"][0]["test:c"], test_c, "bad test_c value") self.assertEqual(meta["annotations"][0]["core:sample_start"], test_index, "Bad test index") self.assertEqual(meta["annotations"][1]["unknown:test_d"], test_d, "bad test_d value") self.assertEqual(meta["annotations"][1]["unknown:test_e"], test_e, "bad test_e value") self.assertEqual(meta["annotations"][1]["unknown:test_f"], test_f, "bad test_f value") self.assertEqual(meta["annotations"][1]["core:sample_start"], test_index_2, "Bad test index") # Read out the data and check that it matches file_source = sigmf.source(data_file, "cf32_le") collector = tag_collector() sink = blocks.vector_sink_c() tb = gr.top_block() tb.connect(file_source, collector) tb.connect(collector, sink) tb.start() tb.wait() collector.assertTagExists(0, "rx_rate", samp_rate) collector.assertTagExists(0, "rx_time", time) collector.assertTagExists(0, "rx_freq", freq) collector.assertTagExists(test_index, "test:a", test_a) collector.assertTagExists(test_index, "test:b", test_b) collector.assertTagExists(test_index, "test:c", test_c) collector.assertTagExists(test_index_2, "test_d", test_d) collector.assertTagExists(test_index_2, "test_e", test_e) collector.assertTagExists(test_index_2, "test_f", test_f)
def test_repeat(self): """Test to ensure that repeat works correctly""" N = 1000 annos = [{ "core:sample_start": 1, "core:sample_count": 1, "test:foo": "a", }] data, meta_json, filename, meta_file = self.make_file( "repeat", N=N, annotations=annos) file_source = sigmf.source(filename, "cf32_le", repeat=True) # Add begin tag to test begin_tag_val = pmt.to_pmt("BEGIN") file_source.set_begin_tag(begin_tag_val) sink = blocks.vector_sink_c() tb = gr.top_block() tb.connect(file_source, sink) tb.start() # sleep for a very short amount of time to allow for some repeats # to happen sleep(.005) tb.stop() tb.wait() data_len = len(data) written_data = sink.data() written_len = len(written_data) num_reps = written_len / data_len # check that repeats occurred self.assertGreater(num_reps, 1, "No repeats occurred to test!") # check for begin tags for i in range(num_reps): tags_for_offset = [ t for t in sink.tags() if t.offset == i * N and pmt.to_python(t.key) == pmt.to_python(begin_tag_val) ] self.assertEqual(len(tags_for_offset), 1, "begin tag missing in repeat") def check_for_tag(l, key, val, err): tags = [ t for t in l if pmt.to_python(t.key) == key and pmt.to_python(t.value) == val ] self.assertEqual(len(tags), 1, err) for i in range(num_reps): tags_for_offset = [ t for t in sink.tags() if t.offset == (i * N) + 1 ] self.assertEqual(len(tags_for_offset), 2, "Wrong number of tags for offset") check_for_tag(tags_for_offset, "core:sample_count", 1, "core:sample_count missing") check_for_tag(tags_for_offset, "test:foo", "a", "test:foo missing") # check that the data is correctly repeated for rep in range(num_reps): self.assertComplexTuplesAlmostEqual( data, written_data[rep * data_len:(rep + 1) * data_len]) last_partial = written_data[num_reps * data_len:] partial_data = data[:len(last_partial)] self.assertComplexTuplesAlmostEqual(last_partial, partial_data)