def request_data(): from_date = '2016-01-01' to_date = '2016-01-31' wr = wsKlimaRequest('getMetData', {'timeserietypeID': 2, 'format': "", 'from': from_date, 'to': to_date, 'stations': [25112,], 'elements': ['TA', 'FF', 'DD'], 'hours': range(0,24), 'months': "", 'username': ""}).get() _fname = '../Test/Data/hemsedal_hollekolten_jan2016.xml' _f = open(_fname, 'w') _f.write(wr.text) _f.close() print('Data written to %s' % _fname) print(wr.url) wr = wsKlimaRequest('getMetData', {'timeserietypeID': 2, 'format': "", 'from': from_date, 'to': to_date, 'stations': [25100,], 'elements': ['RR_1', 'RR_24', 'SA'], 'hours': range(0,24), 'months': "", 'username': ""}).get() _fname = '../Test/Data/hemsedal_hoelto_jan2016.xml' _f = open(_fname, 'w') _f.write(wr.text) _f.close() print('Data written to %s' % _fname) print(wr.url) wr = wsKlimaRequest('getMetData', {'timeserietypeID': 2, 'format': "", 'from': from_date, 'to': to_date, 'stations': [25110,], 'elements': ['RR_1', 'RR_24', 'SA'], 'hours': range(0,24), 'months': "", 'username': ""}).get() _fname = '../Test/Data/hemsedal_II_jan2016.xml' _f = open(_fname, 'w') _f.write(wr.text) _f.close() print('Data written to %s' % _fname) print(wr.url)
def hourly_params(): # Make an eklima request using default Norwegian language wr = wsKlimaRequest('getElementsFromTimeserieType', {'timeserietypeID': 2}).get() # Parse XML string root = etree.fromstring(wr.content) # Open file for Norwegian version fid = open('eklima_param_list_no.csv', 'w') fid.write('#Gruppe,#Undergruppe,#Kode,#Enhet,#Nummer,#Beskrivelse\n') # Iterate over all "item" elements in the XML file [fid.write("{0},{1},{2},{3},{4},{5}\n".format(element.find('elemGroup').text.encode('utf-8'), element.find('name').text.encode('utf-8'), element.find('elemCode').text.encode('utf-8'), element.find('unit').text.encode('utf-8'), element.find('elemNo').text.encode('utf-8'), element.find('description').text.encode('utf-8'))) for element in root.iter("item")] # Temporary list of element codes elem_list = [element.find('elemCode').text for element in root.iter("item")] fid.close() # Make an eklima request for the property of the same elements using English language # Requires function getElementsProperties, which gives same output. wr = wsKlimaRequest('getElementsProperties', {'language': 'en', 'elem_codes': elem_list}).get() # Parse XML string root = etree.fromstring(wr.content) # Open file with English version fid = open('eklima_param_list_en.csv', 'w') fid.write('#Group,#Subgroup,#Code,#Unit,#Number,#Description\n') # Iterate over all "item" elements using list comprehension [fid.write("{0},{1},{2},{3},{4},{5}\n".format(element.find('elemGroup').text.encode('utf-8'), element.find('name').text.encode('utf-8'), element.find('elemCode').text.encode('utf-8'), element.find('unit').text.encode('utf-8'), element.find('elemNo').text.encode('utf-8'), element.find('description').text.encode('utf-8'))) for element in root.iter("item")] fid.close() print '...files written'
def hourly_params(): # Make an eklima request using default Norwegian language wr = wsKlimaRequest('getElementsFromTimeserieType', { 'timeserietypeID': 2 }).get() # Parse XML string root = etree.fromstring(wr.content) # Open file for Norwegian version fid = open('eklima_param_list_no.csv', 'w') fid.write('#Gruppe,#Undergruppe,#Kode,#Enhet,#Nummer,#Beskrivelse\n') # Iterate over all "item" elements in the XML file [ fid.write("{0},{1},{2},{3},{4},{5}\n".format( element.find('elemGroup').text.encode('utf-8'), element.find('name').text.encode('utf-8'), element.find('elemCode').text.encode('utf-8'), element.find('unit').text.encode('utf-8'), element.find('elemNo').text.encode('utf-8'), element.find('description').text.encode('utf-8'))) for element in root.iter("item") ] # Temporary list of element codes elem_list = [ element.find('elemCode').text for element in root.iter("item") ] fid.close() # Make an eklima request for the property of the same elements using English language # Requires function getElementsProperties, which gives same output. wr = wsKlimaRequest('getElementsProperties', { 'language': 'en', 'elem_codes': elem_list }).get() # Parse XML string root = etree.fromstring(wr.content) # Open file with English version fid = open('eklima_param_list_en.csv', 'w') fid.write('#Group,#Subgroup,#Code,#Unit,#Number,#Description\n') # Iterate over all "item" elements using list comprehension [ fid.write("{0},{1},{2},{3},{4},{5}\n".format( element.find('elemGroup').text.encode('utf-8'), element.find('name').text.encode('utf-8'), element.find('elemCode').text.encode('utf-8'), element.find('unit').text.encode('utf-8'), element.find('elemNo').text.encode('utf-8'), element.find('description').text.encode('utf-8'))) for element in root.iter("item") ] fid.close() print '...files written'
def hourly_rr_ta_uu_ff_dd_po(): wr = wsKlimaRequest('getStationsFromTimeserieTypeStationsElemCode', {'stations': [], 'timeserietypeID': 2, 'elem_codes': ['RR_1', 'RR_24', 'TA', 'UU', 'FF', 'DD', 'PO'], 'username': ""}) rsp = wr.get() # Parse XML string root = etree.fromstring(rsp.content) # Temporary list of element codes station_list = [] # prepare outfile outfile = 'stations_hourly_rr_ta_uu_ff_dd_po.txt' fid = open(outfile, 'w') fid.write('#\tSNR\tSTNR\tLAT_DEC\tLON_DEC\tAMSL\tST_NAME\tDepartment\n') # Iterate over all "item" elements for element in root.iter("item"): # Add only stations that still are operative if int(element.find('toYear').text) == 0: station_list.append(int(element.find('stnr').text)) fid.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\n".format(element.find('wmoNo').text.encode('utf-8'), element.find('stnr').text, element.find('latDec').text, element.find('lonDec').text, element.find('amsl').text, element.find('name').text.encode('utf-8'), element.find('department').text.encode('utf-8'))) print("Found {0} stations.\nWritten to {1}".format(len(station_list), outfile)) fid.close()
def crocus_station_list(): stat = json.load(open('Test/Data/crocus_stations.json', 'r')) station_list = stat['crocus_stations_2016'] wr = wsKlimaRequest('getStationsProperties', {'stations': station_list, 'username': ''}) rsp = wr.get() print(type(rsp.content)) sd = parse_get_stations_properties(rsp.content) # sd = crocus_station_list() db = CrocusStationDB('./Test/Data/stations.db') # db.create_station_db() for s in iter(sd.values()): print(s) db.insert_station(s) db.close()
def crocus_station_list(): stat = json.load(open('Test/Data/crocus_stations.json', 'r')) station_list = stat['crocus_stations_2016'] wr = wsKlimaRequest('getStationsProperties', { 'stations': station_list, 'username': '' }) rsp = wr.get() print(type(rsp.content)) sd = parse_get_stations_properties(rsp.content) # sd = crocus_station_list() db = CrocusStationDB('./Test/Data/stations.db') # db.create_station_db() for s in iter(sd.values()): print(s) db.insert_station(s) db.close()
def hourly_rr_ta_uu_ff_dd_po(): wr = wsKlimaRequest( 'getStationsFromTimeserieTypeStationsElemCode', { 'stations': [], 'timeserietypeID': 2, 'elem_codes': ['RR_1', 'RR_24', 'TA', 'UU', 'FF', 'DD', 'PO'], 'username': "" }) rsp = wr.get() # Parse XML string root = etree.fromstring(rsp.content) # Temporary list of element codes station_list = [] # prepare outfile outfile = 'stations_hourly_rr_ta_uu_ff_dd_po.txt' fid = open(outfile, 'w') fid.write('#\tSNR\tSTNR\tLAT_DEC\tLON_DEC\tAMSL\tST_NAME\tDepartment\n') # Iterate over all "item" elements for element in root.iter("item"): # Add only stations that still are operative if int(element.find('toYear').text) == 0: station_list.append(int(element.find('stnr').text)) fid.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\n".format( element.find('wmoNo').text.encode('utf-8'), element.find('stnr').text, element.find('latDec').text, element.find('lonDec').text, element.find('amsl').text, element.find('name').text.encode('utf-8'), element.find('department').text.encode('utf-8'))) print("Found {0} stations.\nWritten to {1}".format(len(station_list), outfile)) fid.close()
def request_data(): from_date = "2016-01-01" to_date = "2016-01-31" wr = wsKlimaRequest( "getMetData", { "timeserietypeID": 2, "format": "", "from": from_date, "to": to_date, "stations": [25112], "elements": ["TA", "FF", "DD"], "hours": range(0, 24), "months": "", "username": "", }, ).get() _fname = "../Test/Data/hemsedal_hollekolten_jan2016.xml" _f = open(_fname, "w") _f.write(wr.text) _f.close() print("Data written to %s" % _fname) print(wr.url) wr = wsKlimaRequest( "getMetData", { "timeserietypeID": 2, "format": "", "from": from_date, "to": to_date, "stations": [25100], "elements": ["RR_1", "RR_24", "SA"], "hours": range(0, 24), "months": "", "username": "", }, ).get() _fname = "../Test/Data/hemsedal_hoelto_jan2016.xml" _f = open(_fname, "w") _f.write(wr.text) _f.close() print("Data written to %s" % _fname) print(wr.url) wr = wsKlimaRequest( "getMetData", { "timeserietypeID": 2, "format": "", "from": from_date, "to": to_date, "stations": [25110], "elements": ["RR_1", "RR_24", "SA"], "hours": range(0, 24), "months": "", "username": "", }, ).get() _fname = "../Test/Data/hemsedal_II_jan2016.xml" _f = open(_fname, "w") _f.write(wr.text) _f.close() print("Data written to %s" % _fname) print(wr.url)
__author__ = 'kmu' ''' stnr = 63705 stations = [stnr,] t_from = '2015-12-04' t_to = '2015-12-07' tt_id = 2 ''' STEP 1 ''' rsp = wsKlimaRequest('getStationsProperties', {'stations': stations, 'username': ""}).get() st_props = parse_get_stations_properties(rsp.content) print(st_props[str(stations[0])]) rsp = wsKlimaRequest('getElementsFromTimeserieTypeStation', {'stations': stations, 'timeserietypeID': tt_id}).get() st_elems = parse_get_elements_from_timeserie_type_station(rsp.content) relevant_sensors = ['TA', 'RR_1', 'RR_6', 'RRINTENS', 'DAGRRRR00', 'DAGRRSS00', 'X1UU', 'X1UM', 'UU', 'FX_6', 'FX_12', 'X1FX_1', 'X1FF', 'DD', 'FF', 'SAM', 'SA', 'RTS_1', 'SS_1', 'SS_24',