Пример #1
0
 def test_dam_test(self):
     with torch.cuda.device(0):
         gages_input = GagesModel.load_datamodel(
             self.config_data.data_path["Temp"],
             data_source_file_name='test_data_source.txt',
             stat_file_name='test_Statistics.json',
             flow_file_name='test_flow.npy',
             forcing_file_name='test_forcing.npy',
             attr_file_name='test_attr.npy',
             f_dict_file_name='test_dictFactorize.json',
             var_dict_file_name='test_dictAttribute.json',
             t_s_dict_file_name='test_dictTimeSpace.json')
         pred, obs = master_test(gages_input, epoch=cfg.TEST_EPOCH)
         basin_area = gages_input.data_source.read_attr(
             gages_input.t_s_dict["sites_id"], ['DRAIN_SQKM'],
             is_return_dict=False)
         mean_prep = gages_input.data_source.read_attr(
             gages_input.t_s_dict["sites_id"], ['PPTAVG_BASIN'],
             is_return_dict=False)
         mean_prep = mean_prep / 365 * 10
         pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
         obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
         save_result(gages_input.data_source.data_config.data_path['Temp'],
                     cfg.TEST_EPOCH, pred, obs)
         plot_we_need(gages_input,
                      obs,
                      pred,
                      id_col="STAID",
                      lon_col="LNG_GAGE",
                      lat_col="LAT_GAGE")
Пример #2
0
 def test_test_camels(self):
     data_model = CamelsModel.load_datamodel(
         self.config_data.data_path["Temp"],
         data_source_file_name='test_data_source.txt',
         stat_file_name='test_Statistics.json',
         flow_file_name='test_flow.npy',
         forcing_file_name='test_forcing.npy',
         attr_file_name='test_attr.npy',
         f_dict_file_name='test_dictFactorize.json',
         var_dict_file_name='test_dictAttribute.json',
         t_s_dict_file_name='test_dictTimeSpace.json')
     with torch.cuda.device(2):
         pred, obs = master_test(data_model)
         basin_area = data_model.data_source.read_attr(
             data_model.t_s_dict["sites_id"], ['area_gages2'],
             is_return_dict=False)
         mean_prep = data_model.data_source.read_attr(
             data_model.t_s_dict["sites_id"], ['p_mean'],
             is_return_dict=False)
         pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
         obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
         plot_we_need(data_model,
                      obs,
                      pred,
                      id_col="id",
                      lon_col="lon",
                      lat_col="lat")
Пример #3
0
    def test_test_gages_better_lstm(self):
        data_model = GagesModel.load_datamodel(
            self.config_data.data_path["Temp"],
            data_source_file_name='test_data_source.txt',
            stat_file_name='test_Statistics.json',
            flow_file_name='test_flow.npy',
            forcing_file_name='test_forcing.npy',
            attr_file_name='test_attr.npy',
            f_dict_file_name='test_dictFactorize.json',
            var_dict_file_name='test_dictAttribute.json',
            t_s_dict_file_name='test_dictTimeSpace.json')
        data_set = StreamflowInputDataset(data_model, train_mode=False)
        # data_set = GagesInputDataset(data_model_train, train_mode=False)
        pred, obs = master_test_easier_lstm(data_set, load_epoch=25)
        basin_area = data_model.data_source.read_attr(
            data_model.t_s_dict["sites_id"], ['DRAIN_SQKM'],
            is_return_dict=False)
        mean_prep = data_model.data_source.read_attr(
            data_model.t_s_dict["sites_id"], ['PPTAVG_BASIN'],
            is_return_dict=False)
        mean_prep = mean_prep / 365 * 10
        pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
        obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)

        plot_we_need(data_model, obs, pred)
Пример #4
0
 def test_test_gages_wo_attr(self):
     config_dir = definitions.CONFIG_DIR
     config_file = os.path.join(config_dir, "susquehanna/config_exp2.ini")
     subdir = r"susquehanna/exp2"
     config_data = GagesConfig.set_subdir(config_file, subdir)
     data_model = GagesModelWoBasinNorm.load_datamodel(
         config_data.data_path["Temp"],
         data_source_file_name='test_data_source.txt',
         stat_file_name='test_Statistics.json',
         flow_file_name='test_flow.npy',
         forcing_file_name='test_forcing.npy',
         attr_file_name='test_attr.npy',
         f_dict_file_name='test_dictFactorize.json',
         var_dict_file_name='test_dictAttribute.json',
         t_s_dict_file_name='test_dictTimeSpace.json')
     with torch.cuda.device(2):
         pred, obs = master_test(data_model, epoch=self.test_epoch)
         save_result(data_model.data_source.data_config.data_path['Temp'],
                     self.test_epoch, pred, obs)
         plot_we_need(data_model,
                      obs,
                      pred,
                      id_col="STAID",
                      lon_col="LNG_GAGE",
                      lat_col="LAT_GAGE")
Пример #5
0
 def test_test_gages(self):
     data_model = GagesModel.load_datamodel(self.config_data.data_path["Temp"],
                                            data_source_file_name='test_data_source.txt',
                                            stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy',
                                            forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy',
                                            f_dict_file_name='test_dictFactorize.json',
                                            var_dict_file_name='test_dictAttribute.json',
                                            t_s_dict_file_name='test_dictTimeSpace.json')
     with torch.cuda.device(1):
         data_models = GagesModel.every_model(data_model)
         obs_lst = []
         pred_lst = []
         for i in range(len(data_models)):
             print("\n", "Testing model", str(i + 1), ":\n")
             pred, obs = master_test(data_models[i])
             basin_area = data_models[i].data_source.read_attr(data_models[i].t_s_dict["sites_id"], ['area_gages2'],
                                                               is_return_dict=False)
             mean_prep = data_models[i].data_source.read_attr(data_models[i].t_s_dict["sites_id"], ['p_mean'],
                                                              is_return_dict=False)
             pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
             obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
             obs_lst.append(obs.flatten())
             pred_lst.append(pred.flatten())
         preds = np.array(pred_lst)
         obss = np.array(obs_lst)
         plot_we_need(data_model, obss, preds, id_col="id", lon_col="lon", lat_col="lat")
Пример #6
0
    def test_plot_1by1(self):
        data_config = self.config_data.read_data_config()
        regions = data_config["regions"]
        data_model_test_lst = []
        obs_lsts = []
        pred_lsts = []
        for i in range(1, len(regions) + 1):
            data_dir_i_temp = '/'.join(
                self.config_data.data_path['Temp'].split('/')[:-1])
            data_dir_i = os.path.join(data_dir_i_temp, "exp" + str(i))
            data_model_i = GagesModel.load_datamodel(
                data_dir_i,
                data_source_file_name='test_data_source.txt',
                stat_file_name='test_Statistics.json',
                flow_file_name='test_flow.npy',
                forcing_file_name='test_forcing.npy',
                attr_file_name='test_attr.npy',
                f_dict_file_name='test_dictFactorize.json',
                var_dict_file_name='test_dictAttribute.json',
                t_s_dict_file_name='test_dictTimeSpace.json')
            data_model_test_lst.append(data_model_i)

            flow_pred_file_i = os.path.join(data_dir_i, 'flow_pred.npy')
            flow_obs_file_i = os.path.join(data_dir_i, 'flow_obs.npy')
            preds = unserialize_numpy(flow_pred_file_i)
            obss = unserialize_numpy(flow_obs_file_i)
            obs_lsts.append(obss)
            pred_lsts.append(preds)

        # pred_final = unserialize_numpy(self.flow_pred_file)
        # obs_final = unserialize_numpy(self.flow_obs_file)
        obs_final = reduce(lambda a, b: np.vstack((a, b)), obs_lsts)
        pred_final = reduce(lambda a, b: np.vstack((a, b)), pred_lsts)
        gages_model_test = GagesModel.load_datamodel(
            self.config_data.data_path["Temp"],
            data_source_file_name='test_data_source.txt',
            stat_file_name='test_Statistics.json',
            flow_file_name='test_flow.npy',
            forcing_file_name='test_forcing.npy',
            attr_file_name='test_attr.npy',
            f_dict_file_name='test_dictFactorize.json',
            var_dict_file_name='test_dictAttribute.json',
            t_s_dict_file_name='test_dictTimeSpace.json')

        data_model_test = GagesModel.compact_data_model(
            data_model_test_lst, gages_model_test.data_source)
        plot_we_need(data_model_test,
                     obs_final,
                     pred_final,
                     id_col="STAID",
                     lon_col="LNG_GAGE",
                     lat_col="LAT_GAGE")
Пример #7
0
 def test_test_susquehanna(self):
     t_test = self.config_data.model_dict["data"]["tRangeTest"]
     source_data = SusquehannaSource(self.config_data, t_test)
     # 构建输入数据类对象
     data_model = SusquehannaModel(source_data)
     with torch.cuda.device(1):
         # pred, obs = master_test(data_model)
         pred, obs = master_test(data_model, epoch=self.test_epoch)
         basin_area = data_model.data_attr[:, 0:1]
         mean_prep = data_model.ppt_avg_basin
         pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
         obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
         save_result(data_model.data_source.data_config.data_path['Temp'],
                     self.test_epoch, pred, obs)
         plot_we_need(data_model,
                      obs,
                      pred,
                      id_col="id",
                      lon_col="lon",
                      lat_col="lat")
Пример #8
0
 def test_Susquehanna(self):
     t_test = self.config_data.model_dict["data"]["tRangeTest"]
     source_data = SusquehannaSource(self.config_data, t_test)
     # 构建输入数据类对象
     data_model = SusquehannaModel(source_data)
     with torch.cuda.device(1):
         # pred, obs = master_test(data_model)
         pred, obs = master_test(data_model, epoch=300)
         flow_pred_file = os.path.join(
             data_model.data_source.data_config.data_path['Temp'],
             'flow_pred')
         flow_obs_file = os.path.join(
             data_model.data_source.data_config.data_path['Temp'],
             'flow_obs')
         serialize_numpy(pred, flow_pred_file)
         serialize_numpy(obs, flow_obs_file)
         plot_we_need(data_model,
                      obs,
                      pred,
                      id_col="id",
                      lon_col="lon",
                      lat_col="lat")
Пример #9
0
 def test_test_gages(self):
     data_model_origin = GagesModel.load_datamodel(
         self.config_data.data_path["Temp"],
         data_source_file_name='test_data_source.txt',
         stat_file_name='test_Statistics.json',
         flow_file_name='test_flow.npy',
         forcing_file_name='test_forcing.npy',
         attr_file_name='test_attr.npy',
         f_dict_file_name='test_dictFactorize.json',
         var_dict_file_name='test_dictAttribute.json',
         t_s_dict_file_name='test_dictTimeSpace.json')
     warmup_len = 120
     t_range_all = data_model_origin.t_s_dict["t_final_range"]
     t_range_lst = hydro_time.t_range_days(t_range_all)
     t_range_warmup = hydro_time.t_days_lst2range(t_range_lst[:warmup_len])
     t_range_test = hydro_time.t_days_lst2range(t_range_lst[warmup_len:])
     data_model_warmup, data_model = GagesModel.data_models_of_train_test(
         data_model_origin, t_range_warmup, t_range_test)
     data_model.stat_dict = data_model_origin.stat_dict
     with torch.cuda.device(0):
         pred, obs = master_test(data_model, epoch=self.test_epoch)
         basin_area = data_model.data_source.read_attr(
             data_model.t_s_dict["sites_id"], ['DRAIN_SQKM'],
             is_return_dict=False)
         mean_prep = data_model.data_source.read_attr(
             data_model.t_s_dict["sites_id"], ['PPTAVG_BASIN'],
             is_return_dict=False)
         mean_prep = mean_prep / 365 * 10
         pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
         obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
         save_result(data_model.data_source.data_config.data_path['Temp'],
                     self.test_epoch, pred, obs)
         plot_we_need(data_model,
                      obs,
                      pred,
                      id_col="STAID",
                      lon_col="LNG_GAGE",
                      lat_col="LAT_GAGE")
Пример #10
0
 def test_test_gages4susquehanna(self):
     config_dir = definitions.CONFIG_DIR
     config_file = os.path.join(config_dir, "susquehanna/config_exp4.ini")
     subdir = r"susquehanna/exp4"
     config_data = GagesConfig.set_subdir(config_file, subdir)
     dor = -0.02
     gages_model = GagesModels(config_data,
                               screen_basin_area_huc4=False,
                               DOR=dor)
     save_datamodel(gages_model.data_model_test,
                    data_source_file_name='test_data_source.txt',
                    stat_file_name='test_Statistics.json',
                    flow_file_name='test_flow',
                    forcing_file_name='test_forcing',
                    attr_file_name='test_attr',
                    f_dict_file_name='test_dictFactorize.json',
                    var_dict_file_name='test_dictAttribute.json',
                    t_s_dict_file_name='test_dictTimeSpace.json')
     data_model = gages_model.data_model_test
     with torch.cuda.device(2):
         pred, obs = master_test(data_model, epoch=self.test_epoch)
         basin_area = data_model.data_source.read_attr(
             data_model.t_s_dict["sites_id"], ['DRAIN_SQKM'],
             is_return_dict=False)
         mean_prep = data_model.data_source.read_attr(
             data_model.t_s_dict["sites_id"], ['PPTAVG_BASIN'],
             is_return_dict=False)
         mean_prep = mean_prep / 365 * 10
         pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
         obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
         save_result(data_model.data_source.data_config.data_path['Temp'],
                     self.test_epoch, pred, obs)
         plot_we_need(data_model,
                      obs,
                      pred,
                      id_col="STAID",
                      lon_col="LNG_GAGE",
                      lat_col="LAT_GAGE")
Пример #11
0
 def test_dam_test(self):
     with torch.cuda.device(1):
         quick_data_dir = os.path.join(self.config_data.data_path["DB"],
                                       "quickdata")
         data_dir = os.path.join(quick_data_dir,
                                 "allnonref-dam_95-05_nan-0.1_00-1.0")
         data_model_test = GagesModel.load_datamodel(
             data_dir,
             data_source_file_name='test_data_source.txt',
             stat_file_name='test_Statistics.json',
             flow_file_name='test_flow.npy',
             forcing_file_name='test_forcing.npy',
             attr_file_name='test_attr.npy',
             f_dict_file_name='test_dictFactorize.json',
             var_dict_file_name='test_dictAttribute.json',
             t_s_dict_file_name='test_dictTimeSpace.json')
         gages_input = GagesModel.update_data_model(self.config_data,
                                                    data_model_test)
         pred, obs = master_test(gages_input, epoch=self.test_epoch)
         basin_area = gages_input.data_source.read_attr(
             gages_input.t_s_dict["sites_id"], ['DRAIN_SQKM'],
             is_return_dict=False)
         mean_prep = gages_input.data_source.read_attr(
             gages_input.t_s_dict["sites_id"], ['PPTAVG_BASIN'],
             is_return_dict=False)
         mean_prep = mean_prep / 365 * 10
         pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
         obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
         save_result(gages_input.data_source.data_config.data_path['Temp'],
                     self.test_epoch, pred, obs)
         plot_we_need(gages_input,
                      obs,
                      pred,
                      id_col="STAID",
                      lon_col="LNG_GAGE",
                      lat_col="LAT_GAGE")
Пример #12
0
 def test_test_camels_easy_lstm(self):
     data_model = CamelsModel.load_datamodel(
         self.config_data.data_path["Temp"],
         data_source_file_name='test_data_source.txt',
         stat_file_name='test_Statistics.json',
         flow_file_name='test_flow.npy',
         forcing_file_name='test_forcing.npy',
         attr_file_name='test_attr.npy',
         f_dict_file_name='test_dictFactorize.json',
         var_dict_file_name='test_dictAttribute.json',
         t_s_dict_file_name='test_dictTimeSpace.json')
     data_model_train = CamelsModel.load_datamodel(
         self.config_data.data_path["Temp"],
         data_source_file_name='data_source.txt',
         stat_file_name='Statistics.json',
         flow_file_name='flow.npy',
         forcing_file_name='forcing.npy',
         attr_file_name='attr.npy',
         f_dict_file_name='dictFactorize.json',
         var_dict_file_name='dictAttribute.json',
         t_s_dict_file_name='dictTimeSpace.json')
     # pred, obs = master_test(data_model)
     with torch.cuda.device(0):
         data_set = StreamflowInputDataset(data_model, train_mode=False)
         # data_set = StreamflowInputDataset(data_model_train, train_mode=False)
         pred, obs = master_test_easier_lstm(data_set, load_epoch=1)
         # pred, obs = master_test_better_lstm(data_set)
         basin_area = data_model.data_source.read_attr(
             data_model.t_s_dict["sites_id"], ['area_gages2'],
             is_return_dict=False)
         mean_prep = data_model.data_source.read_attr(
             data_model.t_s_dict["sites_id"], ['p_mean'],
             is_return_dict=False)
         pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
         obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
         plot_we_need(data_set.data_model, obs, pred)
Пример #13
0
    def test_dam_test(self):
        quick_data_dir = os.path.join(self.config_data.data_path["DB"],
                                      "quickdata")
        sim_data_dir = os.path.join(quick_data_dir,
                                    "allref_85-05_nan-0.1_00-1.0")
        data_dir = os.path.join(quick_data_dir,
                                "allnonref_85-05_nan-0.1_00-1.0")
        data_model_sim8595 = GagesModel.load_datamodel(
            sim_data_dir,
            data_source_file_name='data_source.txt',
            stat_file_name='Statistics.json',
            flow_file_name='flow.npy',
            forcing_file_name='forcing.npy',
            attr_file_name='attr.npy',
            f_dict_file_name='dictFactorize.json',
            var_dict_file_name='dictAttribute.json',
            t_s_dict_file_name='dictTimeSpace.json')
        data_model_8595 = GagesModel.load_datamodel(
            data_dir,
            data_source_file_name='data_source.txt',
            stat_file_name='Statistics.json',
            flow_file_name='flow.npy',
            forcing_file_name='forcing.npy',
            attr_file_name='attr.npy',
            f_dict_file_name='dictFactorize.json',
            var_dict_file_name='dictAttribute.json',
            t_s_dict_file_name='dictTimeSpace.json')
        data_model_sim9505 = GagesModel.load_datamodel(
            sim_data_dir,
            data_source_file_name='test_data_source.txt',
            stat_file_name='test_Statistics.json',
            flow_file_name='test_flow.npy',
            forcing_file_name='test_forcing.npy',
            attr_file_name='test_attr.npy',
            f_dict_file_name='test_dictFactorize.json',
            var_dict_file_name='test_dictAttribute.json',
            t_s_dict_file_name='test_dictTimeSpace.json')
        data_model_9505 = GagesModel.load_datamodel(
            data_dir,
            data_source_file_name='test_data_source.txt',
            stat_file_name='test_Statistics.json',
            flow_file_name='test_flow.npy',
            forcing_file_name='test_forcing.npy',
            attr_file_name='test_attr.npy',
            f_dict_file_name='test_dictFactorize.json',
            var_dict_file_name='test_dictAttribute.json',
            t_s_dict_file_name='test_dictTimeSpace.json')

        sim_gages_model_train = GagesModel.update_data_model(
            self.sim_config_data, data_model_sim8595, data_attr_update=True)
        gages_model_train = GagesModel.update_data_model(self.config_data,
                                                         data_model_8595,
                                                         data_attr_update=True)
        sim_gages_model_test = GagesModel.update_data_model(
            self.sim_config_data,
            data_model_sim9505,
            data_attr_update=True,
            train_stat_dict=sim_gages_model_train.stat_dict)
        gages_model_test = GagesModel.update_data_model(
            self.config_data,
            data_model_9505,
            data_attr_update=True,
            train_stat_dict=gages_model_train.stat_dict)
        nid_dir = os.path.join(
            "/".join(self.config_data.data_path["DB"].split("/")[:-1]), "nid",
            "quickdata")
        nid_input = NidModel.load_nidmodel(
            nid_dir,
            nid_file=self.nid_file,
            nid_source_file_name='nid_source.txt',
            nid_data_file_name='nid_data.shp')
        gage_main_dam_purpose = unserialize_json(
            os.path.join(nid_dir, "dam_main_purpose_dict.json"))
        gage_main_dam_purpose_lst = list(gage_main_dam_purpose.values())
        gage_main_dam_purpose_unique = np.unique(gage_main_dam_purpose_lst)
        data_input = GagesDamDataModel(gages_model_test, nid_input, True,
                                       gage_main_dam_purpose)
        for i in range(0, gage_main_dam_purpose_unique.size):
            sim_gages_model_test.update_model_param('train', nEpoch=300)
            gages_input = choose_which_purpose(
                data_input, purpose=gage_main_dam_purpose_unique[i])
            new_temp_dir = os.path.join(
                gages_input.data_source.data_config.model_dict["dir"]["Temp"],
                gage_main_dam_purpose_unique[i])
            new_out_dir = os.path.join(
                gages_input.data_source.data_config.model_dict["dir"]["Out"],
                gage_main_dam_purpose_unique[i])
            gages_input.update_datamodel_dir(new_temp_dir, new_out_dir)
            model_input = GagesSimDataModel(sim_gages_model_test, gages_input)
            pred, obs = master_test_natural_flow(model_input,
                                                 epoch=self.test_epoch)
            basin_area = model_input.data_model2.data_source.read_attr(
                model_input.data_model2.t_s_dict["sites_id"], ['DRAIN_SQKM'],
                is_return_dict=False)
            mean_prep = model_input.data_model2.data_source.read_attr(
                model_input.data_model2.t_s_dict["sites_id"], ['PPTAVG_BASIN'],
                is_return_dict=False)
            mean_prep = mean_prep / 365 * 10
            pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
            obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
            save_result(
                model_input.data_model2.data_source.data_config.
                data_path['Temp'], str(self.test_epoch), pred, obs)
            plot_we_need(gages_input,
                         obs,
                         pred,
                         id_col="STAID",
                         lon_col="LNG_GAGE",
                         lat_col="LAT_GAGE")
Пример #14
0
            pred, obs = master_test(gages_model_test, epoch=test_epoch)
            basin_area = gages_model_test.data_source.read_attr(
                gages_model_test.t_s_dict["sites_id"], ['DRAIN_SQKM'],
                is_return_dict=False)
            mean_prep = gages_model_test.data_source.read_attr(
                gages_model_test.t_s_dict["sites_id"], ['PPTAVG_BASIN'],
                is_return_dict=False)
            mean_prep = mean_prep / 365 * 10
            pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False)
            obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False)
            save_result(
                gages_model_test.data_source.data_config.data_path['Temp'],
                test_epoch, pred, obs)
            plot_we_need(gages_model_test,
                         obs,
                         pred,
                         id_col="STAID",
                         lon_col="LNG_GAGE",
                         lat_col="LAT_GAGE")

# plot box - latency
if 'post' in doLst:
    config_data = load_dataconfig_case_exp(cfg, exp_lst[0])
    quick_data_dir = os.path.join(config_data.data_path["DB"], "quickdata")
    data_dir = os.path.join(quick_data_dir, "conus-all_90-10_nan-0.0_00-1.0")
    data_model_train = GagesModel.load_datamodel(
        data_dir,
        data_source_file_name='data_source.txt',
        stat_file_name='Statistics.json',
        flow_file_name='flow.npy',
        forcing_file_name='forcing.npy',
        attr_file_name='attr.npy',