def test_prepare_read(): expect(prepare_read(open('tests/data/real_file'))) == \ ['This is a test file-type object\n'] test_list = ['This is a test list-type object', 'with two elements'] expect(prepare_read(test_list)) == \ ['This is a test list-type object', 'with two elements'] expect(prepare_read(open('tests/data/real_file'), 'read')) == \ 'This is a test file-type object\n'
def import_locations(self, marker_file): """Parse Xearth data files ``import_locations()`` returns a dictionary with keys containing the xearth_ name, and values consisting of a :class:`Xearth` object and a string containing any comment found in the marker file. It expects Xearth marker files in the following format:: # Comment 52.015 -0.221 "Home" # James Rowe's home 52.6333 -2.5 "Telford" Any empty line or line starting with a '#' is ignored. All data lines are whitespace-normalised, so actual layout should have no effect. The above file processed by ``import_locations()`` will return the following ``dict`` object:: {'Home': point.Point(52.015, -0.221, "James Rowe's home"), 'Telford': point.Point(52.6333, -2.5, None)} .. note: This function also handles the extended xplanet_ marker files whose points can optionally contain added xplanet specific keywords for defining colours and fonts. >>> markers = Xearths(open("xearth")) >>> for key, value in sorted(markers.items()): ... print("%s - %s" % (key, value)) Home - James Rowe's home (N52.015°; W000.221°) Telford - N52.633°; W002.500° :type marker_file: ``file``, ``list`` or ``str`` :param marker_file: Xearth marker data to read :rtype: ``dict`` :return: Named locations with optional comments .. _xearth: http://www.cs.colorado.edu/~tuna/xearth/ .. _xplanet: http://xplanet.sourceforge.net/ """ self._marker_file = marker_file data = utils.prepare_read(marker_file) for line in data: line = line.strip() if not line or line.startswith("#"): continue chunk = line.split("#") data = chunk[0] comment = chunk[1].strip() if len(chunk) == 2 else None # Need maximum split of 2, because name may contain whitespace latitude, longitude, name = data.split(None, 2) name = name.strip() # Find matching start and end quote, and keep only the contents name = name[1:name.find(name[0], 1)] self[name.strip()] = Xearth(latitude, longitude, comment)
def import_locations(self, data, index="WMO"): """Parse NOAA weather station data files ``import_locations()`` returns a dictionary with keys containing either the WMO or ICAO identifier, and values that are ``Station`` objects that describes the large variety of data exported by NOAA_. It expects data files in one of the following formats:: 00;000;PABL;Buckland, Buckland Airport;AK;United States;4;65-58-56N;161-09-07W;;;7;; 01;001;ENJA;Jan Mayen;;Norway;6;70-56N;008-40W;70-56N;008-40W;10;9;P 01;002;----;Grahuken;;Norway;6;79-47N;014-28E;;;;15; or:: AYMD;94;014;Madang;;Papua New Guinea;5;05-13S;145-47E;05-13S;145-47E;3;5;P AYMO;--;---;Manus Island/Momote;;Papua New Guinea;5;02-03-43S;147-25-27E;;;4;; AYPY;94;035;Moresby;;Papua New Guinea;5;09-26S;147-13E;09-26S;147-13E;38;49;P Files containing the data in this format can be downloaded from the :abbr:`NOAA (National Oceanographic and Atmospheric Administration)`'s site in their `station location page`_. WMO indexed files downloaded from the :abbr:`NOAA (National Oceanographic and Atmospheric Administration)` site when processed by ``import_locations()`` will return ``dict`` object of the following style:: {'00000': Station('PABL', 'Buckland, Buckland Airport', 'AK', 'United States', 4, 65.982222. -160.848055, None, None, 7, False), '01001'; Station('ENJA', Jan Mayen, None, 'Norway', 6, 70.933333, -7.333333, 70.933333, -7.333333, 10, 9, True), '01002': Station(None, 'Grahuken', None, 'Norway', 6, 79.783333, 13.533333, None, None, 15, False)} And ``dict`` objects such as the following will be created when ICAO indexed data files are processed:: {'AYMD': Station("94", "014", "Madang", None, "Papua New Guinea", 5, -5.216666, 145.783333, -5.216666, 145.78333333333333, 3, 5, True, 'AYMO': Station(None, None, "Manus Island/Momote", None, "Papua New Guinea", 5, -2.061944, 147.424166, None, None, 4, False, 'AYPY': Station("94", "035", "Moresby", None, "Papua New Guinea", 5, -9.433333, 147.216667, -9.433333, 147.216667, 38, 49, True} >>> stations = Stations(open("WMO_stations")) >>> for key, value in sorted(stations.items()): ... print("%s - %s" % (key, value)) 00000 - Buckland, Buckland Airport (PABL - N65.982°; W161.152°) 01001 - Jan Mayen (ENJA - N70.933°; W008.667°) 01002 - Grahuken (N79.783°; E014.467°) >>> stations = Stations(open("ICAO_stations"), "ICAO") >>> for key, value in sorted(stations.items()): ... print("%s - %s" % (key, value)) AYMD - Madang (94014 - S05.217°; E145.783°) AYMO - Manus Island/Momote (S02.062°; E147.424°) AYPY - Moresby (94035 - S09.433°; E147.217°) >>> stations = Stations(open("broken_WMO_stations")) >>> for key, value in sorted(stations.items()): ... print("%s - %s" % (key, value)) 71046 - Komakuk Beach, Y. T. (CWKM - N69.617°; W140.200°) 71899 - Langara, B. C. (CWLA - N54.250°; W133.133°) >>> stations = Stations(open("broken_ICAO_stations"), "ICAO") >>> for key, value in sorted(stations.items()): ... print("%s - %s" % (key, value)) KBRX - Bordeaux (N41.933°; W104.950°) KCQB - Chandler, Chandler Municipal Airport (N35.724°; W096.820°) KTYR - Tyler, Tyler Pounds Field (N32.359°; W095.404°) :type data: ``file``, ``list`` or ``str`` :param data: NOAA station data to read :type index: ``str`` :param index: The identifier type used in the file :rtype: ``dict`` :return: WMO locations with `Station` objects :raise FileFormatError: Unknown file format .. _NOAA: http://weather.noaa.gov/ .. _station location page: http://weather.noaa.gov/tg/site.shtml """ self._data = data data = utils.prepare_read(data) for line in data: line = line.strip() chunk = line.split(";") if not len(chunk) == 14: if index == "ICAO": # Some entries only have 12 or 13 elements, so we assume 13 # and 14 are None. Of the entries I've hand checked this # assumption would be correct. logging.debug("Extending ICAO `%s' entry, because it is " "too short to process" % line) chunk.extend(["", ""]) elif index == "WMO" and len(chunk) == 13: # A few of the WMO indexed entries are missing their RBSN # fields, hand checking the entries for 71046 and 71899 # shows that they are correct if we just assume RBSN is # false. logging.debug("Extending WMO `%s' entry, because it is " "too short to process" % line) chunk.append("") else: raise utils.FileFormatError("NOAA") if index == "WMO": identifier = "".join(chunk[:2]) alt_id = chunk[2] elif index == "ICAO": identifier = chunk[0] alt_id = "".join(chunk[1:3]) else: raise ValueError("Unknown format `%s'" % index) if alt_id in ("----", "-----"): alt_id = None name = chunk[3] state = chunk[4] if chunk[4] else None country = chunk[5] wmo = int(chunk[6]) if chunk[6] else None point_data = [] for i in chunk[7:11]: if not i: point_data.append(None) continue # Some entries in nsd_cccc.txt are of the format "DD-MM- # N", so we just take the spaces to mean 0 seconds. if " " in i: logging.debug("Fixing unpadded location data in `%s' entry" % line) i = i.replace(" ", "0") values = map(int, i[:-1].split("-")) if i[-1] in ("S", "W"): values = [-i for i in values] point_data.append(point.utils.to_dd(*values)) latitude, longitude, ua_latitude, ua_longitude = point_data altitude = int(chunk[11]) if chunk[11] else None ua_altitude = int(chunk[12]) if chunk[12] else None rbsn = False if not chunk[13] else True self[identifier] = Station( alt_id, name, state, country, wmo, latitude, longitude, ua_latitude, ua_longitude, altitude, ua_altitude, rbsn, )
def import_locations(self, gpsdata_file, checksum=True): r"""Import GPS NMEA-formatted data files ``import_locations()`` returns a list of `Fix` objects representing the fix sentences found in the GPS data. It expects data files in NMEA 0183 format, as specified in `the official documentation`_, which is ASCII text such as:: $GPGSV,6,6,21,32,65,170,35*48 $GPGGA,142058,5308.6414,N,00300.9257,W,1,04,5.6,1374.6,M,34.5,M,,*6B $GPRMC,142058,A,5308.6414,N,00300.9257,W,109394.7,202.9,191107,5,E,A*2C $GPGSV,6,1,21,02,76,044,43,03,84,156,49,06,89,116,51,08,60,184,30*7C $GPGSV,6,2,21,09,87,321,50,10,77,243,44,11,85,016,49,12,89,100,52*7A $GPGSV,6,3,21,13,70,319,39,14,90,094,52,16,85,130,49,17,88,136,51*7E $GPGSV,6,4,21,18,57,052,27,24,65,007,34,25,62,142,32,26,88,031,51*73 $GPGSV,6,5,21,27,64,343,33,28,45,231,16,30,84,198,49,31,90,015,52*7C $GPGSV,6,6,21,32,65,170,34*49 $GPWPL,5200.9000,N,00013.2600,W,HOME*5E $GPGGA,142100,5200.9000,N,00316.6600,W,1,04,5.6,1000.0,M,34.5,M,,*68 $GPRMC,142100,A,5200.9000,N,00316.6600,W,123142.7,188.1,191107,5,E,A*21 The reader only imports the GGA, or GPS fix, sentences currently but future versions will probably support tracks and waypoints. Other than that the data is out of scope for ``upoints``. The above file when processed by ``import_locations()`` will return the following ``list`` object:: [Fix(datetime.time(14, 20, 58), 53.1440233333, -3.01542833333, 1, 4, 5.6, 1374.6, 34.5, None, None), Position(datetime.time(14, 20, 58), True, 53.1440233333, -3.01542833333, 109394.7, 202.9, datetime.date(2007, 11, 19), 5.0, 'A'), Waypoint(52.015, -0.221, 'Home'), Fix(datetime.time(14, 21), 52.015, -3.27766666667, 1, 4, 5.6, 1000.0, 34.5, None, None), Position(datetime.time(14, 21), True, 52.015, -3.27766666667, 123142.7, 188.1, datetime.date(2007, 11, 19), 5.0, 'A')] .. note:: The standard is quite specific in that sentences *must* be less than 82 bytes, while it would be nice to add yet another validity check it isn't all that uncommon for devices to break this requirement in their "extensions" to the standard. .. todo:: Add optional check for message length, on by default :type gpsdata_file: ``file``, ``list`` or ``str`` :param gpsdata_file: NMEA data to read :param bool checksum: Whether checksums should be tested :rtype: ``list`` :return: Series of locations taken from the data .. _the official documentation: http://en.wikipedia.org/wiki/NMEA_0183 """ self._gpsdata_file = gpsdata_file data = utils.prepare_read(gpsdata_file) parsers = { 'GPGGA': Fix, 'GPRMC': Position, 'GPWPL': Waypoint, 'GPGLL': LoranPosition, 'LCGLL': LoranPosition, } if not checksum: logging.warning('Disabling the checksum tests should only be used' 'when the device is incapable of emitting the ' 'correct values!') for line in data: # The standard tells us lines should end in \r\n even though some # devices break this, but Python's standard file object solves this # for us anyway. However, be careful if you implement your own # opener. if not line[1:6] in parsers: continue if checksum: values, checksum = line[1:].split('*') if not calc_checksum(values) == int(checksum, 16): raise ValueError('Sentence has invalid checksum') else: values = line[1:].split('*')[0] elements = values.split(',') parser = getattr(parsers[elements[0]], 'parse_elements') self.append(parser(elements[1:]))
def test_prepare_read_read(): assert prepare_read(open('tests/data/real_file', 'r')) == \ ['This is a test file-type object\n', ]
def test_prepare_read(data, result): assert prepare_read(data) == result