from pathlib import Path from camelsml import evaluate, load_config cfg = load_config("run_config.txt") cfg["val_basin_file"] = Path( f"gb_split/split_seed_19970204/basins_validation.txt") for k in range(2, 5): runs = list(Path(f"/work/bernharl/train_us_val_gb/{k}").glob("*")) print(runs) if len(runs) != 1: raise RuntimeError( f"Amount of runs per cross val should be 1, not {len(runs)}") cfg["run_dir"] = runs[0] cfg["train_basin_file"] = Path( f"cv/cross_validation_seed_19970204/{k}/basins_train.txt") cfg["eval_dir"] = Path(f"/work/bernharl/train_us_val_gb/val_gb/{k}") for i in range(1, cfg["epochs"] + 1): evaluate(cfg, split="val", epoch=i)
from pathlib import Path from camelsml import load_config, train cfg = load_config(cfg_file="run_config.txt", device="cuda:0", num_workers=60) cfg["test_basin_file"] = Path( "/home/bernhard/git/Master-Thesis/runs/combined_dataset/train_gb_val_us/cv/cross_validation_seed_19970204/basins_test.txt" ) for i in range(0, 5): cfg["run_dir"] = Path(f"{i}/") cfg["train_basin_file"] = Path( f"/home/bernhard/git/Master-Thesis/runs/combined_dataset/train_gb_val_us/cv/cross_validation_seed_19970204/{i}/basins_train.txt" ) cfg["val_basin_file"] = Path( f"/home/bernhard/git/Master-Thesis/runs/combined_dataset/train_gb_val_us/cv/cross_validation_seed_19970204/{i}/basins_val.txt" ) train(cfg)
from pathlib import Path import pickle import random import numpy as np from camelsml import permutation_test, load_config permutation_folder = Path("permutation") permutation_folder.mkdir(exist_ok=True) cfg = load_config("run_config.txt", device="cuda:0", num_workers=24) np.random.seed(cfg["seed"]) random.seed(cfg["seed"]) for i in range(0, 1): save_path = permutation_folder / f"{i}" save_path.mkdir(exist_ok=True) cv_dir = list((Path().absolute() / f"{i}").glob("*")) if len(cv_dir) != 1: raise RuntimeError(f"cv_dir must contain only one run") else: cv_dir = cv_dir[0] cfg["run_dir"] = cv_dir cfg["train_basin_file"] = Path( f"/home/bernhard/git/Master-Thesis/runs/correlation_reduction/cross_validation/cross_validation_seed_19970204/{i}/basins_train.txt" ) cfg["val_basin_file"] = Path( f"/home/bernhard/git/Master-Thesis/runs/correlation_reduction/cross_validation/cross_validation_seed_19970204/{i}/basins_val.txt" ) with open(save_path / "i_list.pickle", "wb") as outfile: pickle.dump(permutation_test(cfg, k=2), outfile)
from pathlib import Path import numpy as np from camelsml import ( split_basins, cross_validation_split, load_config, combine_cv_datasets, ) cfg = load_config(cfg_file="../camels_root_info.txt") cv_folder_us = Path( "/home/bernhard/git/Master-Thesis/runs/combined_dataset/train_us_val_gb/cv" ) cv_folder_gb = Path( "/home/bernhard/git/Master-Thesis/runs/combined_dataset/train_gb_val_us/cv" ) store_folder = Path("/home/bernhard/git/Master-Thesis/runs/combined_dataset/mixed/cv") combine_cv_datasets( cv_folder_1=cv_folder_us, cv_folder_2=cv_folder_gb, k=5, seed=19970204, normalize=True, store_folder=store_folder, dataset=cfg["dataset"], timeseries=cfg["timeseries"],
from time import sleep from pathlib import Path from camelsml import evaluate, load_config cfg = load_config("run_config.txt", device="cuda:0") cfg["val_basin_file"] = Path( f"gb_split/split_seed_19970204/basins_validation.txt") for k in range(3, 4): runs = list(Path(f"{k}").glob("*")) print(runs) if len(runs) != 1: raise RuntimeError( f"Amount of runs per cross val should be 1, not {len(runs)}") cfg["run_dir"] = runs[0] cfg["train_basin_file"] = Path( f"cv/cross_validation_seed_19970204/{k}/basins_train.txt") cfg["eval_dir"] = Path(f"val_gb/{k}") if k == 0: start = 25 else: start = 1 for i in range(start, cfg["epochs"] + 1): evaluate(cfg, split="val", epoch=i)
from pathlib import Path from camelsml import load_config, train, split_basins cfg = load_config("training_runs/test/test.txt", device="cuda:0", num_workers=24) """split_basins( cfg["camels_root"], "/home/bernhard/git/ealstm_regional_modeling_camels_gb/data/basin_list.txt", split=[0.67, 0.33], store_folder="training_runs", seed=1010, )""" train(cfg)