Ejemplo n.º 1
0
 def setUp(self):
     self.training_save_path = resources.test_training_out_dir
     self.training_data_paths = [
         resources.test_training_src_dir / 'dry_spot_from_ff'
     ]
     self.expected_num_epochs_during_training = 1
     dlds = DataloaderDryspots(image_size=(143, 111),
                               ignore_useless_states=False)
     self.dt = ModelTrainer(
         lambda: DrySpotModel(),
         data_source_paths=self.training_data_paths,
         save_path=self.training_save_path,
         batch_size=10,
         epochs=self.expected_num_epochs_during_training,
         num_validation_samples=5,
         num_test_samples=5,
         data_gather_function=dg.get_filelist_within_folder,
         data_processing_function=dlds.get_flowfront_bool_dryspot,
         loss_criterion=torch.nn.BCELoss(),
         classification_evaluator_function=lambda summary_writer:
         BinaryClassificationEvaluator(summary_writer=summary_writer,
                                       save_path=self.training_save_path,
                                       skip_images=True),
         data_root=resources.test_src_dir,
     )
Ejemplo n.º 2
0
    def test_training_ok_notok(self):
        dls = DataLoaderSensor()

        model_trainer = ModelTrainer(
            lambda: ERFH5_PressureSequence_Model(),
            self.training_data_paths,
            self.training_save_path,
            epochs=self.expected_num_epochs_during_training,
            data_gather_function=dg.get_filelist_within_folder,
            data_processing_function=dls.sensorgrid_simulationsuccess,
            num_validation_samples=1,
            num_test_samples=1,
            loss_criterion=torch.nn.BCELoss(),
            classification_evaluator_function=lambda summary_writer:
            BinaryClassificationEvaluator(summary_writer=summary_writer),
            data_root=test_resources.test_src_dir,
        )

        model_trainer.start_training()

        dirs = [e for e in self.training_save_path.iterdir() if e.is_dir()]
        with open(dirs[0] / "output.log") as f:
            content = f.read()
        epochs = re.findall("Mean Loss on Eval", content)
        self.assertTrue(len(epochs) > 0)
Ejemplo n.º 3
0
 def test_training_load_optimizer(self):
     dl = DataloaderImages((149, 117), ignore_useless_states=False)
     st = ModelTrainer(
         lambda: DeconvModelEfficient(),
         self.test_src_dir,
         self.training_save_path,
         load_datasets_path=self.test_split_dir,
         cache_path=None,
         batch_size=16,
         train_print_frequency=10,
         epochs=self.expected_num_epochs_during_training,
         num_workers=4,
         num_validation_samples=2,
         num_test_samples=self.num_test_samples,
         data_processing_function=dl.get_sensordata_and_flowfront,
         data_gather_function=get_filelist_within_folder_blacklisted,
         loss_criterion=torch.nn.BCELoss(),
         optimizer_path=self.checkpoint,
         classification_evaluator_function=lambda summary_writer:
         SensorToFlowfrontEvaluator(summary_writer=summary_writer),
         data_root=test_resources.test_src_dir,
     )
     st.start_training()
     after = len(st.optimizer.state.keys())
     """ Optimizer has now more than 0 states, therefore was loaded """
     self.assertGreater(after, 0)
Ejemplo n.º 4
0
 def test_eval_preparation(self):
     dl = DataloaderImages((149, 117), ignore_useless_states=False)
     st = ModelTrainer(
         lambda: DeconvModelEfficient(),
         self.test_src_dir,
         self.eval_output_path,
         load_datasets_path=self.test_split_dir,
         cache_path=None,
         batch_size=2,
         train_print_frequency=10,
         epochs=0,
         num_workers=4,
         num_validation_samples=2,
         num_test_samples=self.num_test_samples,
         data_processing_function=dl.get_sensordata_and_flowfront,
         data_gather_function=get_filelist_within_folder_blacklisted,
         classification_evaluator_function=lambda summary_writer:
         SensorToFlowfrontEvaluator(summary_writer=summary_writer),
         data_root=test_resources.test_src_dir,
     )
     st.start_training()
     dirs = [e for e in self.eval_output_path.iterdir() if e.is_dir()]
     code_dir = dirs[0] / 'rtm-predictions'
     slurm_script = dirs[0] / 'run_model_eval.sh'
     self.assertTrue(os.path.isdir(code_dir))
     self.assertTrue(os.path.isfile(slurm_script))
     with open(slurm_script) as f:
         lines = f.read().splitlines()
         tokens = lines[-1].split()
         self.assertEqual(dirs[0], Path(tokens[-3]))
     st.writer.flush()
     st.writer.close()
Ejemplo n.º 5
0
 def test_training(self):
     num_epochs = 2
     dl = DataloaderImages((149, 117), ignore_useless_states=False)
     st = ModelTrainer(
         lambda: DeconvModelEfficient(),
         self.test_src_dir,
         self.training_save_path,
         load_datasets_path=self.test_split_dir,
         cache_path=None,
         batch_size=16,
         train_print_frequency=10,
         epochs=num_epochs,
         num_workers=4,
         num_validation_samples=2,
         num_test_samples=self.num_test_samples,
         data_processing_function=dl.get_sensordata_and_flowfront,
         data_gather_function=get_filelist_within_folder_blacklisted,
         loss_criterion=torch.nn.BCELoss(),
         classification_evaluator_function=lambda summary_writer:
         SensorToFlowfrontEvaluator(summary_writer=summary_writer),
         data_root=test_resources.test_src_dir,
     )
     st.start_training()
     dirs = [e for e in self.training_save_path.iterdir() if e.is_dir()]
     with open(dirs[0] / 'output.log') as f:
         content = f.read()
         epochs = re.findall('Mean Loss on Eval', content)
         self.assertEqual(num_epochs, len(epochs))
         # Check if steps are growing / if there are doubled steps in the output
         steps = [
             int(re.findall(r'\d+', x)[0])
             for x in re.findall(r'Duration of step.+\d:', content)
         ]
         self.assertEqual(len(set(steps)), len(steps))
Ejemplo n.º 6
0
 def create_trainer_and_start(self,
                              out_path,
                              epochs=1,
                              load_test_set=False):
     dlds = DataloaderFlowfrontSensor(sensor_indizes=((1, 8), (1, 8)))
     m = ModelTrainer(
         lambda: S20DryspotModelFCWide(),
         data_source_paths=tr_resources.get_data_paths_debug(),
         save_path=out_path,
         load_datasets_path=self.torch_dataset_resources /
         "reference_datasets",
         cache_path=None,
         num_validation_samples=8,
         num_test_samples=8,
         num_workers=0,
         epochs=epochs,
         data_processing_function=dlds.get_flowfront_sensor_bool_dryspot,
         data_gather_function=dg.get_filelist_within_folder_blacklisted,
         loss_criterion=torch.nn.BCELoss(),
         optimizer_function=lambda params: torch.optim.AdamW(params,
                                                             lr=1e-4),
         classification_evaluator_function=lambda summary_writer:
         BinaryClassificationEvaluator(summary_writer=summary_writer),
         load_test_set_in_training_mode=load_test_set,
         data_root=test_resources.test_src_dir,
     )
     return m
    def init_trainer():

        dlds = DataloaderDryspots()
        m = ModelTrainer(
            lambda: model,
            data_source_paths=filepaths,
            save_path=save_path,
            cache_path=cache_path,
            batch_size=batch_size,
            train_print_frequency=train_print_frequency,
            looping_strategy=NoOpLoopingStrategy(),
            epochs=epochs,
            dummy_epoch=False,
            num_workers=num_workers,
            num_validation_samples=num_validation_samples,
            num_test_samples=num_test_samples,
            data_processing_function=dlds.
            get_sensor_bool_dryspot_resized_matrix,
            data_gather_function=get_filelist_within_folder_blacklisted,
            data_root=data_root,
            loss_criterion=torch.nn.BCELoss(),
            optimizer_function=lambda params: torch.optim.AdamW(params,
                                                                lr=1e-4),
            classification_evaluator_function=lambda:
            BinaryClassificationEvaluator(),
            lr_scheduler_function=lambda optim: ExponentialLR(optim, 0.5),
            caching_torch=False,
            demo_path=None,
            hold_samples_in_memory=False,
        )

        return m
Ejemplo n.º 8
0
    def test_eval(self):
        dl = DataloaderImages((149, 117), ignore_useless_states=False)
        st = ModelTrainer(
            lambda: DeconvModelEfficient(),
            self.test_src_dir,
            self.eval_output_path,
            load_datasets_path=self.test_split_dir,
            cache_path=None,
            batch_size=2,
            train_print_frequency=10,
            epochs=self.expected_num_epochs_during_training,
            num_workers=10,
            num_validation_samples=2,
            num_test_samples=self.num_test_samples,
            data_processing_function=dl.get_sensordata_and_flowfront,
            data_gather_function=get_filelist_within_folder_blacklisted,
            loss_criterion=torch.nn.BCELoss(),
            classification_evaluator_function=lambda summary_writer:
            SensorToFlowfrontEvaluator(self.eval_output_path /
                                       "eval_on_test_set",
                                       skip_images=False,
                                       summary_writer=summary_writer),
            data_root=test_resources.test_src_dir,
        )

        st.inference_on_test_set(
            self.eval_output_path,
            self.checkpoint,
            classification_evaluator_function=lambda summary_writer:
            SensorToFlowfrontEvaluator(self.eval_output_path /
                                       "eval_on_test_set",
                                       skip_images=False,
                                       summary_writer=summary_writer))

        with open(self.eval_output_path / "eval_on_test_set" /
                  "test_output.log") as f:
            content = f.read()
            loss = float(
                re.findall(r'\d+.\d+',
                           re.findall(r'Eval: \d+\.\d+', content)[0])[0])
            self.assertEqual(np.round(loss, 4), self.expected_loss)
        img_path = self.eval_output_path / 'eval_on_test_set' / 'images'
        list_all_imgs = list(img_path.glob('**/*.jpg'))
        self.assertEqual(len(list_all_imgs), self.expected_num_frames)
Ejemplo n.º 9
0
class TestTrainingDryspotFF(unittest.TestCase):
    def setUp(self):
        self.training_save_path = resources.test_training_out_dir
        self.training_data_paths = [
            resources.test_training_src_dir / 'dry_spot_from_ff'
        ]
        self.expected_num_epochs_during_training = 1
        dlds = DataloaderDryspots(image_size=(143, 111),
                                  ignore_useless_states=False)
        self.dt = ModelTrainer(
            lambda: DrySpotModel(),
            data_source_paths=self.training_data_paths,
            save_path=self.training_save_path,
            batch_size=10,
            epochs=self.expected_num_epochs_during_training,
            num_validation_samples=5,
            num_test_samples=5,
            data_gather_function=dg.get_filelist_within_folder,
            data_processing_function=dlds.get_flowfront_bool_dryspot,
            loss_criterion=torch.nn.BCELoss(),
            classification_evaluator_function=lambda summary_writer:
            BinaryClassificationEvaluator(summary_writer=summary_writer,
                                          save_path=self.training_save_path,
                                          skip_images=True),
            data_root=resources.test_src_dir,
        )

    def test_training(self):
        self.dt.start_training()
        dirs = [e for e in self.training_save_path.iterdir() if e.is_dir()]
        with open(dirs[0] / "output.log") as f:
            content = f.read()
            epochs = re.findall("Mean Loss on Eval", content)
            self.assertTrue(len(epochs) > 0)

    def tearDown(self) -> None:
        logging.shutdown()
        r = logging.getLogger("")
        [r.removeHandler(x) for x in r.handlers]
        shutil.rmtree(self.training_save_path)
Ejemplo n.º 10
0
    def test_save_load_training(self):
        num_epochs = 2
        dl = DataloaderImages((149, 117), ignore_useless_states=False)
        st = ModelTrainer(
            lambda: DeconvModelEfficient(),
            self.test_src_dir,
            self.training_save_path,
            load_datasets_path=self.test_split_dir,
            cache_path=None,
            batch_size=16,
            train_print_frequency=10,
            epochs=num_epochs,
            num_workers=4,
            num_validation_samples=2,
            num_test_samples=self.num_test_samples,
            data_processing_function=dl.get_sensordata_and_flowfront,
            data_gather_function=get_filelist_within_folder_blacklisted,
            loss_criterion=torch.nn.BCELoss(),
            classification_evaluator_function=lambda summary_writer:
            SensorToFlowfrontEvaluator(summary_writer=summary_writer),
            data_root=test_resources.test_src_dir,
        )
        st.start_training()

        num_epochs = 2
        dl = DataloaderImages((149, 117), ignore_useless_states=False)
        st = ModelTrainer(
            lambda: DeconvModelEfficient(),
            self.test_src_dir,
            self.training_save_path,
            load_datasets_path=self.test_split_dir,
            cache_path=None,
            batch_size=16,
            train_print_frequency=10,
            epochs=num_epochs,
            num_workers=4,
            num_validation_samples=2,
            num_test_samples=self.num_test_samples,
            data_processing_function=dl.get_sensordata_and_flowfront,
            data_gather_function=get_filelist_within_folder_blacklisted,
            loss_criterion=torch.nn.BCELoss(),
            classification_evaluator_function=lambda summary_writer:
            SensorToFlowfrontEvaluator(summary_writer=summary_writer),
            data_root=test_resources.test_src_dir,
        )
        st.start_training()
Ejemplo n.º 11
0
    dlm = DataLoaderMesh(sensor_indices=((1, 2), (1, 2)))
    mc = MeshCreator(batch_size)
    mesh = mc.batched_mesh_torch(batch_size)
    model = SensorMeshToFlowFrontModel(mesh, batch_size=batch_size)

    m = ModelTrainer(
        lambda: model,
        data_source_paths=filepaths,
        save_path=save_path,
        cache_path=cache_path,
        batch_size=batch_size,
        train_print_frequency=train_print_frequency,
        epochs=epochs,
        dummy_epoch=True,
        num_workers=num_workers,
        num_validation_samples=num_validation_samples,
        num_test_samples=num_test_samples,
        data_processing_function=dlm.get_sensor_flowfront_mesh,
        data_gather_function=get_filelist_within_folder_blacklisted,
        data_root=data_root,
        loss_criterion=torch.nn.BCELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=1e-3),
        classification_evaluator_function=lambda: MeshEvaluator(),
        lr_scheduler_function=None,
        caching_torch=False,
        demo_path=None,
        drop_last_batch=True)

    m.start_training()
Ejemplo n.º 12
0
    args = read_cmd_params()

    img_size = (112, 96)
    dl = DataloaderImages(image_size=img_size, sensor_indizes=((1, 4), (1, 4)))

    m = ModelTrainer(
        lambda: S80DeconvModelEfficient2(demo_mode=True
                                         if args.demo is not None else False),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path if args.demo is None else Path(args.demo),
        load_datasets_path=r.datasets_dryspots,
        cache_path=r.cache_path,
        batch_size=128,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_sensordata_and_flowfront,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),
        classification_evaluator_function=lambda summary_writer:
        SensorToFlowfrontEvaluator(summary_writer=summary_writer),
        demo_path=args.demo,
        run_eval_step_before_training=True,
        resize_label_to=img_size if args.demo is not None else (0, 0))

    if not args.run_eval:
        m.start_training()
    else:
        m.inference_on_test_set(
from Trainer.evaluation import BinaryClassificationEvaluator
from Utils.eval_utils import run_eval_w_binary_classificator

if __name__ == "__main__":
    dl = DataloaderDryspots(sensor_indizes=((1, 8), (1, 8)),
                            aux_info=True)

    checkpoint_p = r.chkp_S20_densenet_baseline_full_trainingset
    adv_output_dir = checkpoint_p.parent / "advanced_eval"
    m = ModelTrainer(
        lambda: S20DryspotModelFCWide(),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path,
        dataset_split_path=r.dataset_split,
        cache_path=r.cache_path,
        batch_size=32768,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_sensor_bool_dryspot,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.BCELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),
        classification_evaluator_function=lambda: BinaryClassificationEvaluator(),
        caching_torch=False
    )

    run_eval_w_binary_classificator(adv_output_dir, m, checkpoint_p)
Ejemplo n.º 14
0
    else:
        checkpoint_path = "Use your own path to checkpoint."

    dlds = DataloaderDryspots()
    m = ModelTrainer(
        lambda: SensorDeconvToDryspotEfficient2(pretrained="deconv_weights",
                                                checkpoint_path=Path(
                                                    checkpoint_path),
                                                freeze_nlayers=8),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path if args.demo is None else Path(args.demo),
        load_datasets_path=r.datasets_dryspots,
        cache_path=r.cache_path,
        batch_size=2048,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dlds.get_sensor_bool_dryspot,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.BCELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=1e-4),
        classification_evaluator_function=lambda summary_writer:
        BinaryClassificationEvaluator(summary_writer=summary_writer),
        lr_scheduler_function=lambda optim: ExponentialLR(optim, 0.5),
        demo_path=args.demo)

    if not args.run_eval:
        m.start_training()
    else:
Ejemplo n.º 15
0
if __name__ == "__main__":
    args = read_cmd_params()

    dlds = DataloaderFlowfrontSensor(sensor_indizes=((0, 1), (0, 1)),
                                     frame_count=1,
                                     use_binary_sensor_only=True)
    m = ModelTrainer(lambda: S1140DryspotModelFCWide(),
                     data_source_paths=r.get_data_paths_debug(),
                     save_path=r.save_path,
                     dataset_split_path=r.dataset_split,
                     cache_path=r.cache_path,
                     batch_size=2048,
                     train_print_frequency=100,
                     epochs=100,
                     num_workers=75,
                     num_validation_samples=512,  # 131072,
                     num_test_samples=1024,  # 1048576,
                     data_processing_function=dlds.get_flowfront_sensor_bool_dryspot,
                     data_gather_function=get_filelist_within_folder_blacklisted,
                     loss_criterion=torch.nn.BCELoss(),
                     optimizer_function=lambda params: torch.optim.AdamW(params, lr=1e-4),
                     classification_evaluator_function=lambda: BinaryClassificationEvaluator(),
                     dont_care_num_samples=True
                     )

    if not args.run_eval:
        m.start_training()
    else:
        m.inference_on_test_set(
            output_path=Path(args.eval),
            checkpoint_path=Path(args.checkpoint_path),
        checkpoint_path = "Use your own path to checkpoint."

    dl = DataloaderDryspots(sensor_indizes=((1, 4), (1, 4)))

    m = ModelTrainer(
        lambda: S80Deconv2ToDrySpotEff(demo_mode=True
                                       if args.demo is not None else False,
                                       pretrained="deconv_weights",
                                       checkpoint_path=Path(checkpoint_path),
                                       freeze_nlayers=9),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path if args.demo is None else Path(args.demo),
        dataset_split_path=r.dataset_split,
        cache_path=r.cache_path,
        batch_size=2048,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_sensor_bool_dryspot,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),
        classification_evaluator_function=lambda:
        BinaryClassificationEvaluator(),
        lr_scheduler_function=lambda optim: ExponentialLR(optim, 0.5),
        demo_path=args.demo)

    if not args.run_eval:
        m.start_training()
    else:
Ejemplo n.º 17
0
    """
    This is the starting point for training the feed foward network with 80 sensor data to binary classification.
    """
    args = read_cmd_params()

    dlds = DataloaderDryspots(sensor_indizes=((1, 4), (1, 4)))
    m = ModelTrainer(
        lambda: S80DryspotModelFCWide(demo_mode=True if args.demo is not None else False),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path if args.demo is None else Path(args.demo),
        load_datasets_path=r.datasets_dryspots,
        cache_path=r.cache_path,
        batch_size=32768,
        train_print_frequency=100,
        epochs=100,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dlds.get_sensor_bool_dryspot,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.BCELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=1e-4),
        classification_evaluator_function=lambda summary_writer:
        BinaryClassificationEvaluator(summary_writer=summary_writer),
        demo_path=args.demo
    )

    if not args.run_eval:
        m.start_training()
    else:
        m.inference_on_test_set(
            output_path=Path(args.eval),
Ejemplo n.º 18
0
import torch

import Resources.training as r
from Models.erfh5_pressuresequence_CRNN import ERFH5_PressureSequence_Model
from Pipeline import data_loader_sensor as dls, data_gather as dg
from Trainer.ModelTrainer import ModelTrainer
from Trainer.evaluation import BinaryClassificationEvaluator

if __name__ == "__main__":
    data_source_paths = [r.data_root / "2019-07-24_16-32-40_5000p"]
    save_path = r.save_path
    cache_path = r.cache_path

    trainer = ModelTrainer(
        lambda: ERFH5_PressureSequence_Model(),
        data_source_paths,
        save_path,
        None,
        epochs=2,
        data_gather_function=dg.get_filelist_within_folder,
        data_processing_function=dls.sensorgrid_simulationsuccess,
        loss_criterion=torch.nn.BCELoss(),
        classification_evaluator_function=BinaryClassificationEvaluator(),
    )
    trainer.start_training()
    print("training finished")
    # def get_sampler(data_source):
    #     return RandomOverSampler(data_source, multiply_by=2)

    m = ModelTrainer(
        lambda: S80Deconv2ToDrySpotEff(pretrained="deconv_weights",
                                       checkpoint_path=r.chkp_S80_to_ff2,
                                       freeze_nlayers=9,
                                       round_at=0.8),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path,
        load_datasets_path=r.datasets_dryspots,
        cache_path=r.cache_path,
        batch_size=8192,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_sensor_bool_dryspot,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),
        classification_evaluator_function=lambda summary_writer:
        BinaryClassificationEvaluator(summary_writer=summary_writer),
        # lr_scheduler_function=lambda optim: ExponentialLR(optim, 0.8),
        # sampler=get_sampler
    )

    if not args.run_eval:
        m.start_training()
    else:
                          sensor_indizes=((1, 4), (1, 4)))

    checkpoint_p = r.chkp_S80_to_ff2
    adv_output_dir = checkpoint_p.parent / "advanced_eval"

    m = ModelTrainer(
        lambda: S80DeconvModelEfficient2(pretrained="all",
                                         freeze_nlayers=9,
                                         checkpoint_path=checkpoint_p,
                                         round_at=.8),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path,
        dataset_split_path=r.dataset_split,
        cache_path=r.cache_path,
        batch_size=2048,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_sensordata_and_flowfront,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),
        classification_evaluator_function=lambda: SensorToFlowfrontEvaluator(),
    )

    output_path = r.chkp_S80_to_ff2.parent
    m.inference_on_test_set(
        output_path / "eval_on_test_set_rounded.5",
        r.chkp_S80_to_ff2,
if __name__ == "__main__":
    args = read_cmd_params()

    dl = DataloaderFlowfrontSensor(image_size=(149, 117),
                                   frame_count=1,
                                   use_binary_sensor_only=True)
    m = ModelTrainer(
        lambda: DeconvModelEfficient(),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path,
        dataset_split_path=r.dataset_split,
        cache_path=r.cache_path,
        batch_size=2048,
        train_print_frequency=100,
        epochs=100,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_flowfront_sensor_and_flowfront_label,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=1e-4),
        classification_evaluator_function=lambda: SensorToFlowfrontEvaluator(),
        dont_care_num_samples=True)

    if not args.run_eval:
        m.start_training()
    else:
        m.inference_on_test_set(
            output_path=Path(args.eval),
            checkpoint_path=Path(args.checkpoint_path),
Ejemplo n.º 22
0
if __name__ == "__main__":
    args = read_cmd_params()

    dl = DataloaderDryspots(image_size=(143, 111))

    m = ModelTrainer(
        lambda: DrySpotModel(),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path,
        load_datasets_path=r.datasets_dryspots,
        cache_path=r.cache_path,
        batch_size=2048,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_flowfront_bool_dryspot,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),
        classification_evaluator_function=lambda summary_writer:
        BinaryClassificationEvaluator(summary_writer=summary_writer),
    )

    if not args.run_eval:
        m.start_training()
    else:
        m.inference_on_test_set(
            Path(args.eval),
    mesh = dlm.get_batched_mesh_torch(batch_size, sample_file)
    model = SensorMeshToDryspotResnet(mesh, batch_size=batch_size, weights_path=weights_path)

    m = ModelTrainer(
        lambda: model,
        data_source_paths=filepaths,
        dataset_split_path=dataset_split_path,
        save_path=save_path,
        cache_path=cache_path,
        batch_size=batch_size,
        train_print_frequency=train_print_frequency,
        epochs=epochs,
        dummy_epoch=True,
        num_workers=num_workers,
        num_validation_samples=num_validation_samples,
        num_test_samples=num_test_samples,
        data_processing_function=dlm.get_sensor_dryspot_mesh,
        data_gather_function=get_filelist_within_folder_blacklisted,
        data_root=data_root,
        loss_criterion=torch.nn.BCELoss(),
        optimizer_function=lambda params: torch.optim.SGD(params, lr=0.001),
        classification_evaluator_function=lambda summary_writer:
        BinaryClassificationEvaluator(summary_writer=summary_writer),
        lr_scheduler_function=None,
        caching_torch=False,
        demo_path=None,
        drop_last_batch=True
    )

    m.inference_on_test_set(checkpoint_path=checkpoint_path,
                            classification_evaluator_function=lambda summary_writer:
if __name__ == "__main__":
    dl = DataloaderImages((149, 117))

    checkpoint_p = r.chkp_S1140_to_ff_0_basepr
    adv_output_dir = checkpoint_p.parent / "advanced_eval"

    m = ModelTrainer(
        lambda: DeconvModelEfficient(),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path,
        load_datasets_path=r.datasets_dryspots,
        cache_path=r.cache_path,
        batch_size=2048,
        train_print_frequency=10,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_sensordata_and_flowfront,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),
        classification_evaluator_function=lambda summary_writer:
        SensorToFlowfrontEvaluator(summary_writer=summary_writer),
    )

    adv_output_dir.mkdir(exist_ok=True)
    m.inference_on_test_set(
        output_path=adv_output_dir,
        checkpoint_path=checkpoint_p,
        classification_evaluator_function=lambda summary_writer:
Ejemplo n.º 25
0
    # model = SensorMeshToFlowFrontModelDGL(mesh, batch_size=batch_size)

    m = ModelTrainer(
        lambda: model,
        data_source_paths=filepaths,
        save_path=save_path,
        cache_path=cache_path,
        batch_size=batch_size,
        train_print_frequency=train_print_frequency,
        epochs=epochs,
        dummy_epoch=False,
        num_workers=num_workers,
        num_validation_samples=num_validation_samples,
        num_test_samples=num_test_samples,
        data_processing_function=dlm.get_sensor_flowfront_mesh,
        data_gather_function=get_filelist_within_folder_blacklisted,
        looping_strategy=looping_strategy,
        data_root=data_root,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=1e-4),
        classification_evaluator_function=lambda:
        FlowFrontMeshEvaluator(sample_file=sample_file,
                               save_path=save_path / "FF_Images/FF_272_loopingtest"),
        lr_scheduler_function=None,
        caching_torch=False,
        demo_path=None,
        drop_last_batch=True,
        hold_samples_in_memory=False
    )

    m.start_training()
    eval = True

    dl = DataloaderImageSequences()
    m = ModelTrainer(
        lambda: OptimusPrime_c2D(batch_size)
        if mode == "Transformer" else (FFTFF() if mode == "ConvLSTM" else
                                       (FF2Perm_Baseline()
                                        if mode == "Conv2D" else
                                        (FF2Perm_3DConv()
                                         if mode == "Conv3D" else FFTFF()))),
        dataset_paths,
        save_path,
        cache_path=None,
        batch_size=batch_size,
        epochs=150,
        num_workers=num_workers,
        num_validation_samples=num_val,
        num_test_samples=num_test,
        data_processing_function=dl.get_flowfront_to_perm_map,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        data_root=data_root,
        demo_path=data_folder,
        classification_evaluator_function=lambda: SensorToFlowfrontEvaluator(
            skip_images=False,
            ignore_inp=False,
            sensors_shape=(143, 111),
            save_path=save_path),
        dummy_epoch=False)

    if not eval:
        m.start_training()
Ejemplo n.º 27
0
if __name__ == "__main__":
    dl = DataloaderDryspots(sensor_indizes=((1, 4), (1, 4)),
                            aux_info=True)

    checkpoint_p = r.chkp_S80_to_ds_thres
    adv_output_dir = checkpoint_p.parent / "advanced_eval"
    m = ModelTrainer(
        lambda: S80Deconv2ToDrySpotEff(pretrained="all",
                                       checkpoint_path=checkpoint_p,
                                       freeze_nlayers=9,
                                       round_at=.8),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path,
        dataset_split_path=r.dataset_split,
        cache_path=r.cache_path,
        batch_size=2048,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dl.get_sensor_bool_dryspot,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=0.0001),
        classification_evaluator_function=lambda: BinaryClassificationEvaluator(),
        caching_torch=False
    )

    run_eval_w_binary_classificator(adv_output_dir, m, checkpoint_p)
Ejemplo n.º 28
0
        batch_size = 256
        dataset_paths = r.get_regular_sampled_data_paths()
        num_workers = 16
        num_val = 100
        num_test = 100
        data_root = r.data_root_every_step

    dl = DataloaderImageSequences()
    m = ModelTrainer(
        lambda: Bumblebee2(),
        dataset_paths,
        r.save_path,
        cache_path=r.cache_path,
        batch_size=batch_size,
        epochs=150,
        num_workers=num_workers,
        num_validation_samples=num_val,
        num_test_samples=num_test,
        data_processing_function=dl.get_flowfront_to_flowfront,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.MSELoss(),
        data_root=data_root,
        classification_evaluator_function=lambda: None,
        dummy_epoch=False)

    if not args.eval:
        m.start_training()
    else:
        m.inference_on_test_set(
            Path(args.eval),
            Path(args.checkpoint_path),
            lambda summary_writer: SensorToFlowfrontEvaluator(
    dl = DataloaderDryspots(sensor_indizes=sensor_indices[str(num_sensors)],
                            aux_info=True,
                            image_size=(143, 111))
    m = ModelTrainer(
        lambda: SensorToBinaryRunwiseModel(num_sensors, conv_lstm_sizes,
                                           fc_sizes),
        dataset_paths,
        r.save_path,
        dataset_split_path=None,  # r.dataset_split,
        data_root=data_root,
        cache_path=r.cache_path,
        batch_size=batch_size,
        epochs=num_epochs,
        num_workers=num_workers,
        num_validation_samples=num_val,
        num_test_samples=num_test,
        data_processing_function=dl.get_sensor_bool_dryspot_runlevel,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.BCELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=lr),
        classification_evaluator_function=lambda:
        BinaryClassificationEvaluator(skip_images=not create_data_plots,
                                      max_epochs=num_epochs,
                                      data_loader=dl),
        # lr_scheduler_function=lambda optim: ExponentialLR(optim, 0.5),
        dummy_epoch=False,
        caching_torch=use_cache,
        run_name=run_name,
        save_in_mlflow_directly=True)

    if not args.run_eval:
        m.start_training()
if __name__ == "__main__":
    args = read_cmd_params()

    dlds = DataloaderFlowfrontSensor(sensor_indizes=((1, 8), (1, 8)))
    m = ModelTrainer(
        lambda: S20DryspotModelFCWide(),
        data_source_paths=r.get_data_paths_base_0(),
        save_path=r.save_path,
        load_datasets_path=r.datasets_dryspots,
        cache_path=r.cache_path,
        batch_size=2048,
        train_print_frequency=100,
        epochs=1000,
        num_workers=75,
        num_validation_samples=131072,
        num_test_samples=1048576,
        data_processing_function=dlds.get_flowfront_sensor_bool_dryspot,
        data_gather_function=get_filelist_within_folder_blacklisted,
        loss_criterion=torch.nn.BCELoss(),
        optimizer_function=lambda params: torch.optim.AdamW(params, lr=1e-4),
        classification_evaluator_function=lambda summary_writer:
        BinaryClassificationEvaluator(summary_writer=summary_writer),
        save_torch_dataset_path=r.datasets_dryspots_torch /
        Path(__file__).stem,
        load_torch_dataset_path=r.datasets_dryspots_torch /
        Path(__file__).stem,
        # lr_scheduler_function=lambda optim: ExponentialLR(optim, 0.1),
    )

    if not args.run_eval:
        m.start_training()
    else: