Example #1
0
 def test_test_gages_iter(self):
     data_model = GagesModel.load_datamodel(self.config_data.data_path["Temp"],
                                            data_source_file_name='test_data_source.txt',
                                            stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy',
                                            forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy',
                                            f_dict_file_name='test_dictFactorize.json',
                                            var_dict_file_name='test_dictAttribute.json',
                                            t_s_dict_file_name='test_dictTimeSpace.json')
     with torch.cuda.device(1):
         obs_lst = []
         pred_lst = []
         for i in range(0, data_model.data_flow.shape[0]):
             print("\n", "Testing model", str(i + 1), ":\n")
             data_models_i = GagesModel.which_data_model(data_model, i)
             pred, obs = master_test_1by1(data_models_i)
             basin_area = data_models_i.data_source.read_attr(data_models_i.t_s_dict["sites_id"], ['DRAIN_SQKM'],
                                                              is_return_dict=False)
             mean_prep = data_models_i.data_source.read_attr(data_models_i.t_s_dict["sites_id"],
                                                             ['PPTAVG_BASIN'],
                                                             is_return_dict=False)
             mean_prep = mean_prep / 365 * 10
             pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
             obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
             obs_lst.append(obs.flatten())
             pred_lst.append(pred.flatten())
         preds = np.array(pred_lst)
         obss = np.array(obs_lst)
         flow_pred_file = os.path.join(data_model.data_source.data_config.data_path['Temp'],
                                       'flow_pred')
         flow_obs_file = os.path.join(data_model.data_source.data_config.data_path['Temp'],
                                      'flow_obs')
         serialize_numpy(preds, flow_pred_file)
         serialize_numpy(obss, flow_obs_file)
Example #2
0
 def test_compact_data_model(self):
     data_dir_temp = '/'.join(
         self.config_data.data_path['Temp'].split('/')[:-1])
     data_dir = os.path.join(data_dir_temp, "exp" + str(1))
     data_model = GagesModel.load_datamodel(
         data_dir,
         data_source_file_name='test_data_source.txt',
         stat_file_name='test_Statistics.json',
         flow_file_name='test_flow.npy',
         forcing_file_name='test_forcing.npy',
         attr_file_name='test_attr.npy',
         f_dict_file_name='test_dictFactorize.json',
         var_dict_file_name='test_dictAttribute.json',
         t_s_dict_file_name='test_dictTimeSpace.json')
     data_model_lst = []
     for j in range(0, 2):  # data_model_i.data_flow.shape[0]
         data_models_j = GagesModel.which_data_model(data_model, j)
         data_model_lst.append(data_models_j)
     gages_model_test = GagesModel.load_datamodel(
         self.config_data.data_path["Temp"],
         data_source_file_name='test_data_source.txt',
         stat_file_name='test_Statistics.json',
         flow_file_name='test_flow.npy',
         forcing_file_name='test_forcing.npy',
         attr_file_name='test_attr.npy',
         f_dict_file_name='test_dictFactorize.json',
         var_dict_file_name='test_dictAttribute.json',
         t_s_dict_file_name='test_dictTimeSpace.json')
     data_model_test = GagesModel.compact_data_model(
         data_model_lst, gages_model_test.data_source)
     print(data_model_test)
Example #3
0
    def test_test_gages_iter(self):
        data_config = self.config_data.read_data_config()
        regions = data_config["regions"]
        data_model_test_lst = []
        with torch.cuda.device(1):
            obs_lsts = []
            pred_lsts = []
            for i in range(1, len(regions) + 1):
                data_dir_i_temp = '/'.join(
                    self.config_data.data_path['Temp'].split('/')[:-1])
                data_dir_i = os.path.join(data_dir_i_temp, "exp" + str(i))
                data_model_i = GagesModel.load_datamodel(
                    data_dir_i,
                    data_source_file_name='test_data_source.txt',
                    stat_file_name='test_Statistics.json',
                    flow_file_name='test_flow.npy',
                    forcing_file_name='test_forcing.npy',
                    attr_file_name='test_attr.npy',
                    f_dict_file_name='test_dictFactorize.json',
                    var_dict_file_name='test_dictAttribute.json',
                    t_s_dict_file_name='test_dictTimeSpace.json')
                data_model_test_lst.append(data_model_i)
                obs_lst = []
                pred_lst = []
                for j in range(0, data_model_i.data_flow.shape[0]):
                    print("\n", "Testing model", str(j + 1), "of",
                          regions[i - 1], "region", ":\n")
                    data_models_j = GagesModel.which_data_model(
                        data_model_i, j)
                    pred, obs = master_test_1by1(data_models_j)
                    basin_area = data_models_j.data_source.read_attr(
                        data_models_j.t_s_dict["sites_id"], ['DRAIN_SQKM'],
                        is_return_dict=False)
                    mean_prep = data_models_j.data_source.read_attr(
                        data_models_j.t_s_dict["sites_id"], ['PPTAVG_BASIN'],
                        is_return_dict=False)
                    mean_prep = mean_prep / 365 * 10
                    pred = _basin_norm(pred,
                                       basin_area,
                                       mean_prep,
                                       to_norm=False)
                    obs = _basin_norm(obs,
                                      basin_area,
                                      mean_prep,
                                      to_norm=False)
                    obs_lst.append(obs.flatten())
                    pred_lst.append(pred.flatten())
                preds = np.array(pred_lst)
                obss = np.array(obs_lst)
                obs_lsts.append(obss)
                pred_lsts.append(preds)

            obs_final = reduce(lambda a, b: np.vstack((a, b)), obs_lsts)
            pred_final = reduce(lambda a, b: np.vstack((a, b)), pred_lsts)

            serialize_numpy(pred_final, self.flow_pred_file)
            serialize_numpy(obs_final, self.flow_obs_file)
Example #4
0
 def test_train_gages_iter(self):
     data_model = GagesModel.load_datamodel(self.config_data.data_path["Temp"],
                                            data_source_file_name='data_source.txt',
                                            stat_file_name='Statistics.json', flow_file_name='flow.npy',
                                            forcing_file_name='forcing.npy', attr_file_name='attr.npy',
                                            f_dict_file_name='dictFactorize.json',
                                            var_dict_file_name='dictAttribute.json',
                                            t_s_dict_file_name='dictTimeSpace.json')
     valid_size = 0.2
     with torch.cuda.device(1):
         for i in range(145, data_model.data_flow.shape[0]):
             print("\n", "Training model", str(i + 1), ":\n")
             data_models_i = GagesModel.which_data_model(data_model, i)
             model, train_loss, valid_loss = master_train_1by1(data_models_i, valid_size=valid_size)
             fig = plot_loss_early_stop(train_loss, valid_loss)
             out_dir = data_models_i.data_source.data_config.data_path["Out"]
             fig.savefig(os.path.join(out_dir, 'loss_plot.png'), bbox_inches='tight')