def test_bufr_read(monkeypatch): """Test reading data and data quality on Metop-A MHS BUFR file.""" monkeypatch.setenv("BUFR_TABLES", os.path.join(test_dir, "bufrtables")) monkeypatch.setenv("BUFR_TABLES_TYPE", "bufrdc") from trollbufr import load_file from trollbufr.bufr import Bufr test_file = os.path.join(test_dir, "metop_mhs.bufr") bufr = Bufr(os.environ["BUFR_TABLES_TYPE"], os.environ["BUFR_TABLES"]) # laod test file and iterate over BUFR for blob, size, header in load_file.next_bufr(test_file): # test header for first BUFR assert header == "IEMX01 EUMP 150722" assert size == 48598 # decode BUFR message bufr.decode(blob) # iterate over subsets for report in bufr.next_subset(): i = 0 # iterate over all descriptor/data sets for k, m, (v, q) in report.next_data(): i += 1 if i >= 4: # after first 3 descriptor/data sets just count continue if i <= 3: # type-marker for first 3 descriptor is not None assert m is not None continue # assert descriptor, data value, quality assert m is not None assert k == 8070 assert v == 3 assert q is None # look-up and assert name and unit kn, ku = bufr.get_tables().lookup_elem(k) assert kn.strip() == "TOVS/ATOVS PRODUCT QUALIFIER" assert ku.strip() == "CODE TABLE 8070" # assert there were 88 descriptors in the subset assert i == 88 # leave for-loops, all tests are done break break
def runner(args): bufr = Bufr(os.environ["BUFR_TABLES_TYPE"], os.environ["BUFR_TABLES"]) with open(args.filename[0], "rb") as fh_in: bufr_data = fh_in.read() if args.amtl: station_descr = (1002, ) else: station_descr = (1002, 1018) try: with gzip_open("%s.geojson.gz" % args.filename[0], "wt") as fh_out: i = 0 if args.jsonp: fh_out.write('appendData( ') fh_out.write('{ "type" : "FeatureCollection",\n') fh_out.write('"datetime_current" : "%s",\n' % (datetime.utcnow().strftime("%Y-%m-%d %H:%M"))) fh_out.write('"features" : [') for blob, size, header in trollbufr.load_file.next_bufr( bin_data=bufr_data): bufr.decode_meta(blob) tabl = bufr.get_tables() for report in bufr.next_subset(): station_accepted = False feature_set = { "type": "Feature", "geometry": { "type": "Point", "coordinates": [] }, "properties": {} } feature_coordinates = [0, 0, 0] feature_properties = {"abbreviated_heading": header} try: j = 0 for descr_entry in report.next_data(): if descr_entry.mark is not None: continue if descr_entry.descr in (5001, 5002, 27001, 27002): feature_coordinates[1] = descr_entry.value continue if descr_entry.descr in (6001, 6002, 28001, 28002): feature_coordinates[0] = descr_entry.value continue if descr_entry.descr in ( 7001, 7002, 7007, 7030, 10007) and descr_entry.value: feature_coordinates[2] = descr_entry.value continue if descr_entry.descr in station_descr and descr_entry.value is not None: station_accepted = True # d_name, d_unit, d_typ d_info = tabl.lookup_elem(descr_entry.descr) if d_info.unit.upper() in ("CCITT IA5", "NUMERIC", "CODE TABLE", "FLAG TABLE"): d_unit = None else: d_unit = d_info.unit if descr_entry.value is None or d_info.type in ( TabBType.NUMERIC, TabBType.LONG, TabBType.DOUBLE): d_value = descr_entry.value elif d_info.type in ( TabBType.CODE, TabBType.FLAG ) and descr_entry.value is not None: d_value = tabl.lookup_codeflag( descr_entry.descr, descr_entry.value) else: d_value = str( descr_entry.value).decode("latin1") feature_properties["data_%03d" % (j)] = { "name": d_info.name, "value": d_value } if d_info.shortname is not None: feature_properties[ "data_%03d" % (j)]["shortname"] = d_info.shortname if d_unit is not None: feature_properties["data_%03d" % (j)]["unit"] = str(d_unit) j += 1 except Exception as e: station_accepted = False if "Unknown descriptor" not in str(e): raise e if station_accepted: if i: fh_out.write(",\n") i += 1 feature_set["geometry"][ "coordinates"] = feature_coordinates feature_set["properties"] = feature_properties dump(feature_set, fh_out, indent=3, separators=(',', ': ')) fh_out.write(']\n}\n') if args.jsonp: fh_out.write(');\n') except Exception as e: logger.info(e, exc_info=1) return 0