Beispiel #1
0
    def test_TrainableFit(self):
        trainable = StandardClassification(self.classifier)
        loader = DataLoader(self.dataset,
                            batch_size=self._BATCH_SIZE,
                            shuffle=True,
                            num_workers=self._NUM_WORKERS,
                            drop_last=True)

        def check_train_mode(metrics):
            with self.subTest("train-mode"):
                self.assertTrue(self.classifier.training)

        def check_eval_mode(metrics):
            with self.subTest("eval-mode"):
                self.assertFalse(self.classifier.training)

        captured = io.StringIO()
        sys.stdout = captured

        train_log, eval_log = trainable.fit(loader,
                                            epochs=self._NUM_EPOCHS,
                                            step_callback=check_train_mode,
                                            validation_dataset=loader,
                                            epoch_callback=check_eval_mode,
                                            validation_interval=60,
                                            num_workers=4)
        sys.stdout = sys.__stdout__

        with self.subTest("validation-interval"):
            self.assertIn("Iteration 60 | ", captured.getvalue())

        self.assertEqual(
            len(train_log),
            self._NUM_EPOCHS * len(self.dataset) // self._BATCH_SIZE)
def make_model_and_process(args, dataset: Dataset, ds_config: DatasetConfig) -> StandardClassification:
    """
    Return the classifier process from the arguments given.
    """
    cuda_setting = not args.no_cuda and torch.cuda.is_available()
    if args.input_norm_method.lower() == "dain":
        kwargs = {
            "feat_dim": len(dataset.channels),
            "start_gate_iter": args.start_gate_iter
        }
        if args.model_type == "tid":
            model = AdaptiveInputNormTIDNet.from_dataset(dataset, **kwargs)
        else:
            print("Creating Adaptive Input Norm EEG Model")
            model = AdaptiveInputNormEEGNet.from_dataset(dataset, **kwargs)

        process = StandardClassification(model, cuda=cuda_setting, learning_rate=ds_config.lr)
    elif args.input_norm_method.lower() == "dain_author":
        model = AdaptiveInputNormEEGNet.from_dataset(dataset)
        process = MultipleParamGroupClassification(model, cuda=cuda_setting, learning_rate=ds_config.lr)
    elif args.surface_laplacian:
        if args.sl_mode == "normal":
            model = LearnedSLEEGNet.from_dataset(dataset, cuda=cuda_setting)
        else:
            model = LearnedSLEEGNet.from_dataset(dataset, cuda=cuda_setting, convolve=True,
                                                 kernel_size=args.kernel_size, temperature=args.temperature)
        process = StandardClassification(model, cuda=cuda_setting, learning_rate=ds_config.lr)
    else:
        model = args.model.from_dataset(dataset)
        process = StandardClassification(model, cuda=cuda_setting, learning_rate=ds_config.lr)

    return process
Beispiel #3
0
    def test_StridedFit(self):
        stride_classifier = EEGNetStrided.from_dataset(self.dataset)
        process = StandardClassification(stride_classifier)
        process.set_scheduler('constant')
        loader = DataLoader(self.dataset,
                            batch_size=self._BATCH_SIZE,
                            shuffle=True,
                            num_workers=self._NUM_WORKERS,
                            drop_last=True)

        def checks(metrics):
            with self.subTest("train-mode"):
                self.assertTrue(self.classifier.training)
            with self.subTest("constant-learning-rate"):
                self.assertTrue(metrics['lr'] == process.lr)

        train_log, eval_log = process.fit(loader,
                                          epochs=self._NUM_EPOCHS,
                                          step_callback=checks)

        self.assertEqual(
            len(train_log),
            self._NUM_EPOCHS * len(self.dataset) // self._BATCH_SIZE)
Beispiel #4
0
                tqdm.tqdm(utils.get_lmoso_iterator(ds_name, ds))):

            tqdm.tqdm.write(torch.cuda.memory_summary())

            if args.model == utils.MODEL_CHOICES[0]:
                model = BENDRClassification.from_dataset(
                    training, multi_gpu=args.multi_gpu)
            else:
                model = LinearHeadBENDR.from_dataset(training)

            if not args.random_init:
                model.load_pretrained_modules(
                    experiment.encoder_weights,
                    experiment.context_weights,
                    freeze_encoder=args.freeze_encoder)
            process = StandardClassification(model, metrics=added_metrics)
            process.set_optimizer(
                torch.optim.Adam(process.parameters(),
                                 ds.lr,
                                 weight_decay=0.01))

            # Fit everything
            process.fit(training_dataset=training,
                        validation_dataset=validation,
                        warmup_frac=0.1,
                        retain_best=retain_best,
                        pin_memory=False,
                        **ds.train_params)

            if args.results_filename:
                if isinstance(test, Thinker):
    start_epoch = None
    if state is not None:
        t_vectors.load(os.path.join(args.checkpoint_dir, 't-vectors.pt'))
        start_epoch = state['epoch']
    logging_remote.watch(t_vectors, log_freq=args.log_interval)

    # Decide whether to use LDAM loss to compensate for training label imbalance, or simple resampling.
    if args.median_sampling:
        sampler = WeightedRandomSampler(sample_weights,
                                        len(counts) * int(np.median(counts)),
                                        replacement=True)
        print("Sampling {} points from each person.".format(np.median(counts)))
    else:
        sampler = None

    process = StandardClassification(t_vectors)
    process.add_batch_transform(
        RandomTemporalCrop(max_crop_frac=args.batch_crop_max))
    process.set_scheduler(
        MultiStepLR(process.optimizer, args.lr_drop_milestones))
    process.set_optimizer(
        torch.optim.Adam(process.parameters(),
                         lr=experiment.training_params.lr,
                         weight_decay=experiment.training_params.l2))

    def knn_validation(metrics):
        logging_remote.training_callback(metrics)
        if args.knn_val > 0:
            validating.clear_transforms()
            validating.add_transform(Deep1010ToEEG())
            x, extras = create_numpy_formatted_tvectors(validating, t_vectors)
Beispiel #6
0
 def _contrastive_accuracy(inputs, outputs):
     logits = outputs[0]
     labels = torch.zeros(logits.shape[0],
                          device=logits.device,
                          dtype=torch.long)
     return StandardClassification._simple_accuracy([labels], logits)
Beispiel #7
0
    results = list()
    for fold, (training, validation, test) in enumerate(tqdm.tqdm(dataset.lmso(experiment.training_params.folds),
                                                        total=experiment.training_params.folds,
                                                        desc="LMSO", unit='fold')):

        # training.add_transform(CropAndResample(dataset.sequence_length - 16, 4))
        training.add_transform(UniformTransformSelection([
            CropAndResample(dataset.sequence_length - 128, stdev=8),
            RandomCrop(dataset.sequence_length - 128)
        ], weights=[0.25, 0.75]))
        validation.add_transform()

        tidnet = FilteredTIDNet.from_dataset(dataset)
        # tidnet = FilteredEEGNet.from_dataset(dataset)

        process = StandardClassification(tidnet, learning_rate=experiment.training_params.lr)
        process.fit(training_dataset=training, validation_dataset=validation, epochs=experiment.training_params.epochs,
                    batch_size=experiment.training_params.batch_size, num_workers=0)

        for _, _, test_thinker in test.loso():
            summary = {'Person': test_thinker.person_id,
                       'Fold': fold+1,
                       "Accuracy": process.evaluate(test_thinker)['Accuracy']}
            results.append(summary)
        _res = DataFrame(results)
        tqdm.tqdm.write(str(_res[_res['Fold'] == fold+1].describe()))

    df = DataFrame(results)
    print(df.describe())
    # df.to_csv('baseline_mmi.csv')
Beispiel #8
0
 def test_MakeSimpleClassifierTrainableCUDA(self):
     trainable = StandardClassification(self.classifier)
     self.assertTrue(True)
Beispiel #9
0
 def test_EvaluationMetrics(self):
     trainable = StandardClassification(self.classifier,
                                        metrics=dict(BAC=balanced_accuracy))
     val_metrics = trainable.evaluate(self.dataset)
     self.assertIn('BAC', val_metrics)
     self.assertIn('loss', val_metrics)