def __init__(self, datafolder, model_save_path): super(predict, self).__init__() self.batch_size = 1 self.dataloader = data.provider(data_folder=datafolder, phase='val', input_shape=(256, 256), batch_size=1) self.model = model3.HoverNet() self.model.load_state_dict(torch.load(model_save_path)) self.model.eval() self.hoverloss = HoverLoss() self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') self.model = self.model.to(self.device)
def __init__(self, model, batch_size: int, max_epoch: int, lr: float, val_freq, model_save_path, data_dir: str, phase: str, input_shape: Tuple[int, int] = (256, 256), checkpoint_path: str = None): self.phase = phase self.batch_size = batch_size self.lr = lr self.max_epoch = max_epoch # self.val_freq: int = val_freq self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.net = model self.net = self.net.to(self.device) self.checkpoint_path = checkpoint_path self.model_save_path: str = model_save_path self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr) # self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode="min", patience=3, # verbose=True, min_lr=3e-6) self.scheduler = torch.optim.lr_scheduler.StepLR(optimizer=self.optimizer, step_size=10, gamma=1) self.hoverloss = loss.HoverLoss() self.best_loss: float = float("inf") # Very high self.val_freq = val_freq self.dataloaders = { 'train': data.provider( data_folder=data_dir, phase='train', batch_size=self.batch_size, input_shape=input_shape ), 'val': data.provider( data_folder=data_dir, phase='val', batch_size=2, input_shape=input_shape ) } self.store = {'train': {'loss': [], 'loss_np': [], 'loss_nc': [], 'loss_hv': []}, 'val': {'loss': [], 'loss_np': [], 'loss_nc': [], 'loss_hv': []}} self.epoch_loss = {'loss': [], 'loss_np': [], 'loss_nc': [], 'loss_hv': []}
def __init__(self, seg_model, cls_model): self.num_workers = 6 self.batch_size = {"val": 1} '''##########hyper-paramaters setting#############''' self.num_epochs = 1 self.title = 'local_dice' self.sel_GPU = '0' #set None to select both GPU self.fold = FOLD self.cls_threshold = [0.5, 0.5, 0.5, 0.5] self.min_size = [600, 600, 1000, 2000] self.seg_threshold = [0.5, 0.5, 0.5, 0.5] '''###############################################''' self.criterion = torch.nn.BCEWithLogitsLoss() self.accumulation_steps = 32 #config path of saving pth self.path = os.path.join('weights', self.title, 'f' + str(self.fold)) if not os.path.isdir(self.path): os.mkdir(self.path) print('make directory done!!') self.file_name = os.path.join(self.path, 'logfile.txt') self.phases = ["val"] #self.device = torch.device("cuda:0") #torch.set_default_tensor_type("torch.cuda.FloatTensor") self.seg_net = seg_model self.cls_net = cls_model ##multi GPU if self.sel_GPU is not None: os.environ['CUDA_VISIBLE_DEVICES'] = self.sel_GPU print('Now using: ' + self.title) cudnn.benchmark = True self.dataloaders = { phase: provider( data_folder="input/severstal-steel-defect-detection/", df_path='input/severstal-steel-defect-detection/train.csv', phase=phase, fold=self.fold, mean=(0.485, 0.456, 0.406), # (0.39, 0.39, 0.39), std=(0.229, 0.224, 0.225), # (0.17, 0.17, 0.17), batch_size=self.batch_size[phase], num_workers=self.num_workers, ) for phase in self.phases } self.losses = {phase: [] for phase in self.phases} self.iou_scores = {phase: [] for phase in self.phases} self.dice_scores = {phase: [] for phase in self.phases}
def __init__(self, model): self.num_workers = 6 self.batch_size = {"train": 16, "val": 2} self.accumulation_steps = 32 // self.batch_size['train'] self.base_threshold = 0.5 # <<<<<<<<<<< here's the threshold '''##########hyper-paramaters setting#############''' self.lr = 5e-4 #default:5e-4 self.num_epochs = 60 self.optim = 'RAdam' #'adam' self.learn_plan = 'step' #step5 self.loss_function = 'pytorchBCE' #'pytorchBCE' 'Lovasz' self.title = 'unetpsp' self.sel_GPU = None #set None to select both GPU self.fold = 0 '''###############################################''' #config path of saving pth self.path = os.path.join('weights', self.title, 'f' + str(self.fold)) if not os.path.isdir(self.path): os.mkdir(self.path) print('make directory done!!') self.file_name = os.path.join(self.path, 'logfile.txt') self.best_loss = float("inf") self.phases = ["train", "val"] #self.device = torch.device("cuda:0") #torch.set_default_tensor_type("torch.cuda.FloatTensor") self.net = model ###resume resume = False #<<<<<<<<<<<<<<<<< TODO: whether resume if resume: weights = torch.load( 'weights/3rdclsALL/3rdclsALLRAdamsteppytorchBCE10113.pth', map_location=lambda storage, loc: storage) self.net.load_state_dict(weights["state_dict"], strict=True) print('resuming model done!!!') if self.optim == 'adam': self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr) elif self.optim == 'SGD': self.optimizer = torch.optim.SGD(self.net.parameters(), lr=self.lr, momentum=0.9) elif self.optim == 'RAdam': self.optimizer = RAdam(self.net.parameters(), lr=self.lr) if self.learn_plan == 'step': self.scheduler = ReduceLROnPlateau(self.optimizer, mode="min", patience=3, verbose=True) if self.loss_function == 'pytorchBCE': self.criterion = torch.nn.BCEWithLogitsLoss( ) #BCELoss()#BCEWithLogitsLoss() elif self.loss_function == 'dice_loss': self.criterion = dice_loss elif self.loss_function == 'weighted_BCE_loss': self.criterion = weighted_BCE_loss elif self.loss_function == 'binary_focal_loss': self.criterion = binary_focal_loss elif self.loss_function == 'generalized_dice_loss': self.criterion = generalized_dice_loss elif self.loss_function == 'mix': self.criterion = dice_BCE elif self.loss_function == 'change': pass ##multi GPU if self.sel_GPU is not None: os.environ['CUDA_VISIBLE_DEVICES'] = self.sel_GPU else: #self.net, self.optimizer = amp.initialize(self.net.cuda(), self.optimizer, opt_level="O1") #<<<<<<<<< os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' self.net = torch.nn.DataParallel(self.net) self.net = self.net.cuda() print('Now using: ' + self.title + ' ' + self.optim + ' ' + self.learn_plan + ' ' + self.loss_function) self.best_val_loss = float('inf') self.best_val_dice = -1 cudnn.benchmark = True self.dataloaders = { phase: provider( data_folder="severstal-256-crop/", df_path="severstal-256-crop/crop_256.csv", phase=phase, fold=self.fold, mean=(0.485, 0.456, 0.406), # (0.39, 0.39, 0.39), std=(0.229, 0.224, 0.225), # (0.17, 0.17, 0.17), batch_size=self.batch_size[phase], num_workers=self.num_workers, ) for phase in self.phases } self.losses = {phase: [] for phase in self.phases} self.iou_scores = {phase: [] for phase in self.phases} self.dice_scores = {phase: [] for phase in self.phases}
def __init__(self, model): self.num_workers = 6 self.batch_size = {"train": 8, "val": 2} self.accumulation_steps = 32 // self.batch_size['train'] self.base_threshold = 0.5 # <<<<<<<<<<< here's the threshold '''##########hyper-paramaters setting#############''' self.lr = 5e-4 #default:5e-4 self.num_epochs = 80 self.optim = 'RAdam' #'adam' self.learn_plan = 'step' #step5 self.loss_function = 'pytorchBCE' #'pytorchBCE' 'Lovasz' self.title = 'seresnet50' self.crop256 = True '''###############################################''' #config path of saving pth path = os.path.join('weights', self.title) if not os.path.isdir(path): os.mkdir(path) print('make directory done!!') self.file_name = os.path.join(path, 'logfile.txt') #config whether to crop if self.crop256: self.data_folder = "severstal-256-crop/" self.df_path = 'severstal-256-crop/crop_256.csv' else: self.data_folder = "input/severstal-steel-defect-detection/" self.df_path = 'input/severstal-steel-defect-detection/train.csv' self.best_loss = float("inf") self.phases = ["train", "val"] #self.device = torch.device("cuda:0") #torch.set_default_tensor_type("torch.cuda.FloatTensor") self.net = model ###resume resume = False #<<<<<<<<<<<<<<<<< TODO: whether resume if resume: weights = torch.load('weights/model_RAdamsteppytorchBCE9150.pth', map_location=lambda storage, loc: storage) self.net.load_state_dict(weights["state_dict"], strict=True) print('resuming model done!!!') if self.optim == 'adam': self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr) elif self.optim == 'SGD': self.optimizer = torch.optim.SGD(self.net.parameters(), lr=self.lr, momentum=0.9) elif self.optim == 'RAdam': self.optimizer = RAdam(self.net.parameters(), lr=self.lr) if self.learn_plan == 'step': self.scheduler = ReduceLROnPlateau(self.optimizer, mode="min", patience=3, verbose=True) elif self.learn_plan == 'SGDR': self.scheduler = CosineAnnealingWithRestartsLR( self.optimizer, T_max=5) #set lr=1e-3 #self.lr = 1e-3 if self.loss_function == 'pytorchBCE': self.criterion = torch.nn.BCEWithLogitsLoss( ) #BCELoss()#BCEWithLogitsLoss() elif self.loss_function == 'dice_loss': self.criterion = dice_loss elif self.loss_function == 'weighted_BCE_loss': self.criterion = weighted_BCE_loss elif self.loss_function == 'binary_focal_loss': self.criterion = binary_focal_loss elif self.loss_function == 'generalized_dice_loss': self.criterion = generalized_dice_loss elif self.loss_function == 'Lovasz': from lovasz_loss import LovaszSoftmax self.criterion = LovaszSoftmax() elif self.loss_function == 'mix': self.criterion = dice_BCE elif self.loss_function == 'change': pass ##multi GPU self.mGPU_apex = True #<<<<<<<<<<<<<<<<<<<TODO: set mGPU here if self.mGPU_apex: #self.net, self.optimizer = amp.initialize(self.net.cuda(), self.optimizer, opt_level="O1") #<<<<<<<<< self.net = torch.nn.DataParallel(self.net) self.net = self.net.cuda() print('Now using: ' + self.optim + ' ' + self.learn_plan + ' ' + self.loss_function) self.best_val_loss = float('inf') self.best_val_dice = -1 cudnn.benchmark = True self.dataloaders = { phase: provider( data_folder=self.data_folder, df_path=self.df_path, phase=phase, mean=(0.485, 0.456, 0.406), # (0.39, 0.39, 0.39), std=(0.229, 0.224, 0.225), # (0.17, 0.17, 0.17), batch_size=self.batch_size[phase], num_workers=self.num_workers, ) for phase in self.phases } self.losses = {phase: [] for phase in self.phases} self.iou_scores = {phase: [] for phase in self.phases} self.dice_scores = {phase: [] for phase in self.phases}
def __init__(self, model): self.num_workers = 6 self.batch_size = {"train": 8, "val": 1} self.accumulation_steps = 32 // self.batch_size['train'] self.base_threshold = 0.5 # <<<<<<<<<<< here's the threshold self.net = model '''##########hyper-paramaters setting#############''' self.lr = 5e-4 #default:5e-4 self.num_epochs = 80 self.optim = 'RAdam' #'adam' self.learn_plan = 'step' #step5 self.loss_function = 'pytorchBCE' #'pytorchBCE' 'Lovasz' self.title = 'res34cv' self.sel_GPU = '1' #set None to select both GPU self.fold = 4 '''###############################################''' # config path of saving pth self.path = os.path.join('weights', self.title, 'f' + str(self.fold)) if not os.path.isdir(self.path): os.mkdir(self.path) print('make directory done!!') self.file_name = os.path.join(self.path, 'logfile.txt') self.data_folder = "input/severstal-steel-defect-detection/" self.df_path = 'input/severstal-steel-defect-detection/train.csv' self.phases = ["train", "val"] self.best_val_loss = float('inf') self.best_val_macc = -1 self.losses = {phase: [] for phase in self.phases} self.acc_total = {phase: [] for phase in self.phases} ###resume resume = False #<<<<<<<<<<<<<<<<< TODO: whether resume if resume: weights = torch.load('weights/00007500_model.pth', map_location=lambda storage, loc: storage) self.net.load_state_dict(weights, strict=True) #["state_dict"] print('resuming model done!!!') if self.optim == 'adam': self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr) elif self.optim == 'SGD': self.optimizer = torch.optim.SGD(self.net.parameters(), lr=self.lr, momentum=0.9) elif self.optim == 'RAdam': self.optimizer = RAdam(self.net.parameters(), lr=self.lr) if self.learn_plan == 'step': self.scheduler = ReduceLROnPlateau(self.optimizer, mode="min", patience=3, verbose=True) if self.loss_function == 'pytorchBCE': self.criterion = torch.nn.BCEWithLogitsLoss( ) #BCELoss()#BCEWithLogitsLoss() elif self.loss_function == 'weighted_BCE': self.criterion = weighted_BCE print('Now using: ' + self.optim + ' ' + self.learn_plan + ' ' + self.loss_function) ##multi GPU if self.sel_GPU is not None: os.environ['CUDA_VISIBLE_DEVICES'] = self.sel_GPU else: #self.net, self.optimizer = amp.initialize(self.net.cuda(), self.optimizer, opt_level="O1") #<<<<<<<<< os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' self.net = torch.nn.DataParallel(self.net) self.net = self.net.cuda() cudnn.benchmark = True self.dataloaders = { phase: provider( data_folder=self.data_folder, df_path=self.df_path, phase=phase, fold=self.fold, mean=(0.485, 0.456, 0.406), # (0.39, 0.39, 0.39), std=(0.229, 0.224, 0.225), # (0.17, 0.17, 0.17), batch_size=self.batch_size[phase], num_workers=self.num_workers, ) for phase in self.phases }