def __init__(self,
                 data_dir,
                 batch_size,
                 epoch_size,
                 validation_split,
                 num_workers,
                 img_size,
                 mixup=0.4,
                 alpha=None,
                 verbose=0):
        self.logger = setup_logger(self, verbose)

        if validation_split > 0:
            valid_tsfm = MediumNpyTransforms(train=False, img_size=img_size)
            dataset_valid = NpyDataset(data_dir, valid_tsfm, None, train=True)

            all_idx = np.arange(len(dataset_valid))
            len_valid = int(len(all_idx) * validation_split)
            valid_idx = np.random.choice(all_idx,
                                         size=len_valid,
                                         replace=False)
            valid_sampler = BatchSampler(SubsetRandomSampler(valid_idx),
                                         batch_size, False)
            self.logger.info(
                f'Selected {len(valid_idx)}/{len(all_idx)} indices for validation'
            )
            valid_targets = dataset_valid.df.iloc[valid_idx].groupby(
                'diagnosis').count()
            self.logger.info(f'Validation class distribution: {valid_targets}')

            self._loader_valid = DataLoader(dataset_valid,
                                            batch_sampler=valid_sampler,
                                            num_workers=num_workers)
        else:
            valid_idx = []

        train_tsfm = MixupNpyTransforms(train=True, img_size=img_size)
        dataset_train = MixupNpyDataset(data_dir,
                                        train_tsfm,
                                        valid_idx,
                                        alpha=mixup,
                                        train=True)
        all_idx = np.arange(len(dataset_train))
        train_idx = [i for i in all_idx if i not in valid_idx]

        if alpha is None:
            self.logger.info('No sample weighting selected.')
            subset = Subset(dataset_train, train_idx)
            sampler = BatchSampler(SequentialSampler(subset), batch_size,
                                   False)
            return sampler, len(train_idx)

        factory = SamplerFactory(verbose)
        sampler = factory.get(dataset_train.df, train_idx, batch_size,
                              epoch_size, alpha)
        self.n_samples = len(sampler) * batch_size

        super().__init__(dataset_train,
                         batch_sampler=sampler,
                         num_workers=num_workers)
    def __init__(self,
                 dataset,
                 batch_size,
                 epoch_size,
                 validation_split,
                 num_workers,
                 train=True,
                 alpha=None,
                 verbose=0):
        self.verbose = verbose
        self.logger = setup_logger(self, self.verbose)
        self.ids = dataset.df['id_code'].values

        self.sampler, self.valid_sampler = self._setup_samplers(
            dataset, batch_size, epoch_size, validation_split, alpha)

        init_kwargs = {'dataset': dataset, 'num_workers': num_workers}
        super().__init__(batch_sampler=self.sampler, **init_kwargs)
Пример #3
0
    def train(self, config, resume):
        setup_logging(config)
        self.logger = setup_logger(self, config['training']['verbose'])
        self._seed_everything(config['seed'])

        self.logger.debug('Getting data_loader instance')
        data_loader = get_instance(module_data, 'data_loader', config)
        valid_data_loader = data_loader.split_validation()

        self.logger.debug('Building model architecture')
        model = get_instance(module_arch, 'arch', config)
        model, device = self._prepare_device(model, config['n_gpu'])

        self.logger.debug('Getting loss and metric function handles')
        loss = getattr(module_loss, config['loss'])
        metrics = [getattr(module_metric, met) for met in config['metrics']]

        self.logger.debug('Building optimizer and lr scheduler')
        trainable_params = filter(lambda p: p.requires_grad,
                                  model.parameters())
        optimizer = get_instance(module_optim, 'optimizer', config,
                                 trainable_params)
        lr_scheduler = get_instance(module_sched, 'lr_scheduler', config,
                                    optimizer)

        self.logger.debug('Initialising trainer')
        trainer = Trainer(model,
                          loss,
                          metrics,
                          optimizer,
                          resume=resume,
                          config=config,
                          device=device,
                          data_loader=data_loader,
                          valid_data_loader=valid_data_loader,
                          lr_scheduler=lr_scheduler)

        trainer.train()
        self.logger.debug('Finished!')
    def __init__(self, model, loss, metrics, optimizer, resume, config, device):
        self.logger = setup_logger(self, verbose=config['training']['verbose'])
        self.model = model
        self.device = device
        self.loss = loss
        self.metrics = metrics
        self.optimizer = optimizer
        self.config = config

        cfg_trainer = config['training']
        self.epochs = cfg_trainer['epochs']
        self.save_period = cfg_trainer['save_period']
        self.monitor = cfg_trainer.get('monitor', 'off')

        # configuration to monitor model performance and save best
        if self.monitor == 'off':
            self.mnt_mode = 'off'
            self.mnt_best = 0
        else:
            self.mnt_mode, self.mnt_metric = self.monitor.split()
            assert self.mnt_mode in ['min', 'max']

            self.mnt_best = math.inf if self.mnt_mode == 'min' else -math.inf
            self.early_stop = cfg_trainer.get('early_stop', math.inf)

        self.start_epoch = 1

        # setup directory for checkpoint saving
        self.checkpoint_dir, writer_dir = trainer_paths(config)
        # setup visualization writer instance
        self.writer = TensorboardWriter(writer_dir, self.logger, cfg_trainer['tensorboard'])

        # Save configuration file into checkpoint directory:
        config_save_path = os.path.join(self.checkpoint_dir, 'config.yaml')
        with open(config_save_path, 'w') as handle:
            yaml.dump(config, handle, default_flow_style=False)

        if resume:
            self._resume_checkpoint(resume)
Пример #5
0
 def __init__(self, verbose=0):
     self.logger = setup_logger(self, verbose)
Пример #6
0
    def test(self, config, model_checkpoint):
        setup_logging(config)
        self.logger = setup_logger(self, config['testing']['verbose'])
        self._seed_everything(config['seed'])

        self.logger.info(f'Using config:\n{config}')

        self.logger.debug('Getting data_loader instance')
        data_loader = PngDataLoader(
            config['testing']['data_dir'],
            batch_size=config['testing']['batch_size'],
            validation_split=0.0,
            train=True,
            alpha=None,
            img_size=config['testing']['img_size'],
            num_workers=config['testing']['num_workers'],
            verbose=config['testing']['verbose'])

        self.logger.debug('Building model architecture')
        model = get_instance(module_arch, 'arch', config)
        model, device = self._prepare_device(model, config['n_gpu'])

        self.logger.debug(f'Loading checkpoint {model_checkpoint}')
        checkpoint = torch.load(model_checkpoint)
        state_dict = checkpoint['state_dict']
        model.load_state_dict(state_dict)

        model.eval()

        ensemble_size = config['testing']['ensemble_size']
        pred_df = pd.DataFrame({'id_code': data_loader.ids})

        self.logger.debug(
            f'Generating {ensemble_size} predictions for {pred_df.shape[0]} samples'
        )
        with torch.no_grad():
            for e in range(
                    ensemble_size
            ):  # perform N sets of predictions and average results
                preds = torch.zeros(len(data_loader.dataset))
                for i, (data, _) in enumerate(tqdm(data_loader)):
                    data = data.to(device)
                    output = model(data).cpu()
                    batch_size = output.shape[0]
                    batch_preds = output.squeeze(1).clamp(min=-0.4, max=4.4)
                    preds[i * batch_size:(i + 1) * batch_size] = batch_preds

                # add column for this iteration of predictions
                pred_df[str(e)] = preds.numpy()

        # wrangle predictions
        pred_df.set_index('id_code', inplace=True)
        pred_df['diagnosis'] = pred_df.apply(
            lambda row: int(np.round(row.mean())), axis=1)
        pred_df['target'] = data_loader.dataset.df.set_index(
            'id_code')['diagnosis']
        self.logger.info(pred_df.head(5))

        preds = pred_df['diagnosis'].values
        targets = pred_df['target'].values

        metrics = [getattr(module_metric, met) for met in config['metrics']]

        for metric in metrics:
            result = metric(preds, targets)
            self.logger.info(f'{metric.__name__}: {result}')

        # pred_df.to_csv('preds.csv')
        pred_df.to_csv('test_predictions.csv')
        self.logger.info('Finished saving test predictions!')
Пример #7
0
 def __init__(self, verbose=0):
     super().__init__()
     self.logger = setup_logger(self, verbose=verbose)