コード例 #1
0
def read_individually_downloaded_sw_file(file_name):
	with open(file_name) as f:
		reader = csv.reader(f)
		rows = [r for r in reader]

	details = rows[13][0]
	# print details
	site_id, site_info, lat, lng, elev = re.match('Site (\d+) (.+) Lat:(.*) Long:(.*) Elev:(.*)', details).groups()
	lat = float(lat)
	lng = float(lng)
	elev = float(elev)

	headers = rows[15]

	date_i = 0; assert headers[date_i] == "Datetime"
	level_i = 1; assert headers[level_i] == "Water Level (m) Mean"
	flow_i = 3; assert headers[flow_i] == "Discharge (Ml/d) Mean"
	old_flow_i = 5; assert headers[old_flow_i] == "Discharge (Ml/d) Mean"

	start_i = 16
	while rows[start_i][level_i] == '':
		start_i += 1

	end_i = len(rows)
	while len(rows[end_i-1]) == 0:
		end_i -= 1

	# print "start_i-end-i", start_i, '-', end_i

	dates = numpy.array([datetime.datetime.strptime(r[date_i], "%d/%m/%Y %H:%M:%S") for r in rows[start_i:end_i]])
	levels = utils.interpolate( numpy.array([utils.as_float(r[level_i]) for r in rows[start_i:end_i]]) )
	flows = utils.interpolate( numpy.array([utils.as_float(r[flow_i]) for r in rows[start_i:end_i]]) )

	return site_id, site_info, lat, lng, elev, dates, levels, flows
コード例 #2
0
def read_bulk_downloaded_sw_file(dir_name, site_id, data_types):

	# get details of all sites
	site_details = {}
	with open(os.path.join(dir_name, "Site Details.csv")) as f:
		reader = csv.DictReader(f)
		for row in reader:
			site_details[row["Site Id"]] = row

	data_types_values = {}
	for data_type in data_types:
		file_name = os.path.join(dir_name, site_id+"."+data_type+".csv")

		with open(file_name) as f:
			reader = csv.reader(f)
			rows = [r for r in reader]

		headers = rows[2]
		date_i = 0; assert headers[date_i] == "Date"
		data_i = 1; assert headers[data_i] == "Mean"

		start_i = 3

		dates = numpy.array([datetime.datetime.strptime(r[date_i], "%H:%M:%S %d/%m/%Y") for r in rows[start_i:]])
		data = utils.interpolate( numpy.array([utils.as_float(r[data_i]) for r in rows[start_i:]]) )

		data_types_values[data_type] ={
			"type": data_type, 
			"dates": dates,
			"data": data
			}

	return site_details[site_id], data_types_values
コード例 #3
0
ファイル: bom.py プロジェクト: mjasher/aus-hydro-data
def get_bom_climate(zipped_sites_dir, chosen_id):

    bom_re = re.compile("(\d{6})_(\d{3}).zip")

    zipped_files = [f for f in os.listdir(zipped_sites_dir) if bom_re.match(f)]

    climate_data = {}

    for zipped_f in zipped_files:
        site_id = bom_re.match(zipped_f).group(1)
        obs_code = bom_re.match(zipped_f).group(2)
        obs_type = bom_obs_types[obs_code]

        if site_id == chosen_id:

            archive = zipfile.ZipFile(os.path.join(zipped_sites_dir, zipped_f))
            csvfile = filter(lambda filename: filename.endswith(".csv"), archive.namelist())[0]
            reader = csv.DictReader(archive.open(csvfile))
            raw_rows = [row for row in reader]
            # dates = [row["Year"]+"-"+row["Month"]+"-"+row["Day"] for row in raw_rows]
            dates = numpy.array(
                [datetime.datetime(int(row["Year"]), int(row["Month"]), int(row["Day"])) for row in raw_rows]
            )
            values = numpy.array([utils.as_float(row[obs_type]) for row in raw_rows])

            climate_data[obs_code] = {"type": obs_type, "dates": dates, "values": values}

    return climate_data