def load_regrid_file(regrid_file): ds = data_store() if os.path.exists(regrid_file): fh = open(regrid_file, 'rb') ds.load(fh) fh.close() print "Loaded regridded data file " + regrid_file else: print "Regridded file: " + regrid_file + " not found" sys.exit(2) return ds
def average_tri_data(input_file, output_file, avg_period): # load the data in ds = data_store() fh_in = open(input_file, 'rb') ds.load(fh_in) fh_in.close() n_ts = ds.get_n_t_steps() n_idxs = ds.get_n_idxs() # create a new datastore mean_ds = data_store() mean_ds.mv = ds.mv # determine whether there is to be an averaging period if avg_period > 1: if avg_period == 30 and n_ts != 30: avg_period = n_ts # set the scaling for each averaging period scale = 1.0 / avg_period # create the datastore mean_ds.set_size(n_ts / avg_period, n_idxs) # loop through the data producing the average for t in range(0, n_ts): dest_pos = t / avg_period for i in range(0, n_idxs): # get the current data value, add the value from the original ds store # multiplied by the scaler c_val = mean_ds[dest_pos, i] t_val = scale * ds[t, i] + c_val mean_ds[dest_pos, i] = t_val else: mean_ds = ds # write the data out fh_out = open(output_file, 'wb') mean_ds.save(fh_out) fh_out.close()
def ens_avg_tri_data(input_list, output_file): # read the input list fh_list = open(input_list) ens_files = fh_list.readlines() fh_list.close() scaler = 1.0/len(ens_files) # create a new datastore mean_ds = data_store() for e in ens_files: # load the data in ds = data_store() fh_in = open(e.strip('\n'), 'rb') ds.load(fh_in) fh_in.close() n_ts = ds.get_n_t_steps() n_idxs = ds.get_n_idxs() # resize the datastore if this is the first load if (mean_ds.get_n_t_steps() == 0): mean_ds.set_size(n_ts, n_idxs) mean_ds.mv = ds.mv # loop through the data producing the average for t in range(0, n_ts): for i in range(0, n_idxs): # get the current data value, add the value from the original ds store # multiplied by the scaler c_val = mean_ds[t, i] t_val = scaler * ds[t, i] + c_val mean_ds[t, i] = t_val # write the data out fh_out = open(output_file, 'wb') mean_ds.save(fh_out) fh_out.close()
def ens_avg_tri_data(input_list, output_file): # read the input list fh_list = open(input_list) ens_files = fh_list.readlines() fh_list.close() scaler = 1.0 / len(ens_files) # create a new datastore mean_ds = data_store() for e in ens_files: # load the data in ds = data_store() fh_in = open(e.strip('\n'), 'rb') ds.load(fh_in) fh_in.close() n_ts = ds.get_n_t_steps() n_idxs = ds.get_n_idxs() # resize the datastore if this is the first load if (mean_ds.get_n_t_steps() == 0): mean_ds.set_size(n_ts, n_idxs) mean_ds.mv = ds.mv # loop through the data producing the average for t in range(0, n_ts): for i in range(0, n_idxs): # get the current data value, add the value from the original ds store # multiplied by the scaler c_val = mean_ds[t, i] t_val = scaler * ds[t, i] + c_val mean_ds[t, i] = t_val # write the data out fh_out = open(output_file, 'wb') mean_ds.save(fh_out) fh_out.close()
def average_tri_data(input_file, output_file, avg_period): # load the data in ds = data_store() fh_in = open(input_file, 'rb') ds.load(fh_in) fh_in.close() n_ts = ds.get_n_t_steps() n_idxs = ds.get_n_idxs() # create a new datastore mean_ds = data_store() mean_ds.mv = ds.mv # determine whether there is to be an averaging period if avg_period > 1: if avg_period == 30 and n_ts != 30: avg_period = n_ts # set the scaling for each averaging period scale = 1.0 / avg_period; # create the datastore mean_ds.set_size(n_ts/avg_period, n_idxs); # loop through the data producing the average for t in range(0, n_ts): dest_pos = t / avg_period; for i in range(0, n_idxs): # get the current data value, add the value from the original ds store # multiplied by the scaler c_val = mean_ds[dest_pos, i] t_val = scale * ds[t, i] + c_val mean_ds[dest_pos, i] = t_val else: mean_ds = ds; # write the data out fh_out = open(output_file, 'wb') mean_ds.save(fh_out) fh_out.close()