Ejemplo n.º 1
0
 def train_dataloader(self):
     train_loader = provider(fold=self.hparams.fold,
                             total_folds=5,
                             image_folder=train_image_folder,
                             mask_folder=train_mask_folder,
                             df_path=train_rle_path,
                             phase='train',
                             size=self.hparams.img_size,
                             mean=(0.485, 0.456, 0.406),
                             std=(0.229, 0.224, 0.225),
                             batch_size=self.hparams.train_batch_size,
                             num_workers=self.hparams.num_workers,
                             shuffle=True)
     return train_loader
Ejemplo n.º 2
0
    def pred(self):
        # n -> test images
        # return output n * (48 dimensional vector)

        # return metrics **

        data_loader = provider(self.root,
                               self.path,
                               batch_size=self.batch_size)
        image_preds_all = []
        image_targets_all = []

        for batch in data_loader:
            img, target = batch
            image_preds = self.model(img.to(device))
            self.predictions.extend(image_preds.detach().cpu().numpy())
            self.labels.extend(target.detach().cpu().numpy())
            image_preds_all += [
                torch.argmax(image_preds, 1).detach().cpu().numpy()
            ]
            image_targets_all += [target.detach().cpu().numpy()]

        for i in range(len(self.predictions)):
            x = self.predictions[i]
            self.predictions[i] = np.exp(x) / sum(np.exp(x))

        image_preds_all = np.concatenate(image_preds_all)
        image_targets_all = np.concatenate(image_targets_all)

        weighted_f1_score = f1_score(image_targets_all,
                                     image_preds_all,
                                     average="weighted")
        macro_f1_score = f1_score(image_targets_all,
                                  image_preds_all,
                                  average="macro")
        balanced_accuracy_score1 = balanced_accuracy_score(
            image_targets_all, image_preds_all)
        accuracy_score1 = accuracy_score(image_targets_all, image_preds_all)
        precision_score1 = precision_score(image_targets_all,
                                           image_preds_all,
                                           average="macro")
        recall_score1 = recall_score(image_targets_all,
                                     image_preds_all,
                                     average="macro")

        self.metrics["weighted_f1_score"] = round(weighted_f1_score, 4)
        self.metrics["macro_f1_score"] = round(macro_f1_score, 4)
        self.metrics["accuracy_score"] = round(accuracy_score1, 4)
        self.metrics["precision_score"] = round(precision_score1, 4)
        self.metrics["recall_score"] = round(recall_score1, 4)
Ejemplo n.º 3
0
    def __init__(self, model, writer, config):
        self.writer = writer
        self.model_name = config.get('MODEL', 'NAME')
        self.data_folder = config.get('FILES', 'DATA_FOLDER')
        self.train_df_path = config.get('FILES', 'TRAIN_DF_PATH')
        self.save_model_dir = config.get('FILES', 'TRAINED_MODELS_DIR')
        self.num_workers = config.getint('COMMON', 'NO_WORKERS')
        self.batch_train = config.getint('TRAINING', 'BATCH_TRAIN')
        self.batch_val = config.getint('TRAINING', 'BATCH_VAL')
        self.batch_size = {"train": self.batch_train, "val": self.batch_val}
        self.accumulation_steps = 32 // self.batch_size['train']
        self.lr = config.getfloat('TRAINING', 'LR')
        self.num_epochs = config.getint('TRAINING', 'EPOCH')
        self.mean = config.get('PROCESSING', 'MEAN').split(',')
        self.std = config.get('PROCESSING', 'STD').split(',')
        self.seed = config.getint('COMMON', 'SEED')

        self.best_loss = float("inf")
        self.phases = ["train", "val"]
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        torch.set_default_tensor_type("torch.cuda.FloatTensor")
        self.net = model
        self.net = self.net.to(self.device)
        self.criterion = torch.nn.BCEWithLogitsLoss()
        self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
        self.scheduler = ReduceLROnPlateau(self.optimizer,
                                           mode="min",
                                           patience=3,
                                           verbose=True)
        self.net = nn.DataParallel(self.net)

        cudnn.benchmark = True
        self.dataloaders = {
            phase: provider(data_folder=self.data_folder,
                            df_path=self.train_df_path,
                            phase=phase,
                            mean=self.mean,
                            std=self.std,
                            batch_size=self.batch_size[phase],
                            num_workers=self.num_workers,
                            seed=self.seed)
            for phase in self.phases
        }
        self.losses = {phase: [] for phase in self.phases}
        self.iou_scores = {phase: [] for phase in self.phases}
        self.dice_scores = {phase: [] for phase in self.phases}
Ejemplo n.º 4
0
 def __init__(self, model, epochs, lr, acc_steps, optimizer, scheduler,
              criterion, fold, size, batch_sz):
     self.num_workers = 1
     self.fold = fold
     self.size = size
     self.accumulation_steps = acc_steps
     self.lr = lr
     self.num_epochs = epochs
     self.batch_size = {"train": batch_sz, "val": 4}
     self.best_loss = float("inf")
     self.phases = ["train", "val"]
     self.device = torch.device("cuda:0")
     torch.set_default_tensor_type("torch.cuda.FloatTensor")
     self.net = model
     self.criterion = criterion
     self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
     self.scheduler = scheduler
     self.net = self.net.to(self.device)
     cudnn.benchmark = True
     self.dataloaders = {
         phase: provider(
             fold=self.fold,
             total_folds=5,
             data_folder=data_folder,
             df_path=train_rle_path,
             phase=phase,
             size=self.size,
             mean=(0.485, 0.456, 0.406),
             std=(0.229, 0.224, 0.225),
             batch_size=self.batch_size[phase],
             num_workers=self.num_workers,
         )
         for phase in self.phases
     }
     self.losses = {phase: [] for phase in self.phases}
     self.iou_scores = {phase: [] for phase in self.phases}
     self.dice_scores = {phase: [] for phase in self.phases}