def check_schedule(self, path, gt_scheduler, gt_lr): logger = TrainingManager(path) w = Variable(torch.zeros((1, ))) optimizer = Adam([w]) lr_scheduler = logger.lr_scheduler(optimizer) for epoch, (gold_class, gold_lr) in enumerate(zip(gt_scheduler, gt_lr)): self.assertEqual(lr_scheduler.current_scheduler.__class__, gold_class) self.assertEqual(lr_scheduler.get_lr(), [gold_lr]) lr_scheduler.step()
def check_schedule_groups(self, path, gt_scheduler, gt_lr): logger = TrainingManager(path) w1 = Variable(torch.zeros((1, ))) w2 = Variable(torch.zeros((1, ))) optimizer = Adam([{ "params": [w1], "lr": 0.0 }, { "params": [w2], "lr": 0.0 }]) lr_scheduler = logger.lr_scheduler(optimizer) for epoch, (gold_class, gold_lr) in enumerate(zip(gt_scheduler, gt_lr)): print(epoch) self.assertEqual(lr_scheduler.current_scheduler.__class__, gold_class) self.assertEqual(lr_scheduler.get_lr(), gold_lr) lr_scheduler.step()
def check_optimizer(self, path): logger = TrainingManager(path) model = logger.model() params = logger.parameters(model) optimizer = logger.optimizer(params) scheduler = logger.lr_scheduler(optimizer) return optimizer
from manager import TrainingManager from torch.utils.tensorboard import SummaryWriter from albumentations import * import cv2 import json from pycocotools.cocoeval import COCOeval try: from apex import amp APEX = True except ModuleNotFoundError: APEX = False if __name__ == '__main__': opt = opts().parse() logger = TrainingManager(opt.save_dir) history = History(opt.save_dir, opt.resume) writer = SummaryWriter() torch.backends.cudnn.benchmark = True print(opt) transforms = { "train": Compose( [ ShiftScaleRotate(rotate_limit=90, scale_limit=(-0.35, 0.3), border_mode=cv2.BORDER_CONSTANT), PadIfNeeded(min_height=512, min_width=512, border_mode=cv2.BORDER_CONSTANT, always_apply=True),
import os import torch.utils.data from albumentations import * from torch import device from history import History from models import load_model, save_model from opts import opts from trainers.segmentation_trainer import SegmentationTrainer as Trainer from manager import TrainingManager if __name__ == '__main__': opt = opts().parse() logger = TrainingManager(opt.save_dir) history = History(opt.save_dir, opt.resume) # writer = SummaryWriter() torch.backends.cudnn.benchmark = True print(opt) transforms = { "train": Compose([ ToGray(), HorizontalFlip(), OneOf([ Compose([ OneOf([ ElasticTransform(alpha=200, sigma=35, p=0.5, border_mode=cv2.BORDER_WRAP), OpticalDistortion(border_mode=cv2.BORDER_WRAP) ], p=0.5), RandomSunFlare(flare_roi=(0, 0, 1, 0.25), num_flare_circles_lower=1, num_flare_circles_upper=2, src_radius=150, p=0.5),
from manager import TrainingManager if __name__ == '__main__': opt = opts().parse() video = cv2.VideoCapture(opt.video) sc = 0.6 writer = cv2.VideoWriter(opt.video.replace(".avi", "result.avi"), cv2.VideoWriter_fourcc(*'MJPG'), 25, (int(int(video.get(3) * sc) // 32 * 32), int(int(video.get(4) * sc) // 32 * 32))) writer_map = cv2.VideoWriter(opt.video.replace(".avi", "result_map.avi"), cv2.VideoWriter_fourcc("M", "J", "P", "G"), 25, (int(int(video.get(3) * sc) // 32 * 32), int(int(video.get(4) * sc) // 32 * 32 * 2))) logger = TrainingManager(opt.save_dir) history = History(opt.save_dir, opt.resume) torch.backends.cudnn.benchmark = True print(opt) os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str opt.device = device('cuda' if opt.gpus[0] >= 0 else 'cpu') losses, loss_weights = logger.loss model = logger.model params = logger.parameters(model) optimizer = logger.optimizer(params) lr_schedule = logger.lr_scheduler(optimizer) model, optimizer, start_epoch, best = load_model(model, opt.load_model, optimizer, opt.resume) metrics = logger.metric trainer = Trainer(model, losses,
import numpy as np import torch.utils.data from albumentations import * from sklearn.metrics import roc_auc_score from torch import device from history import History from models.__init__ import load_model, save_model from opts import opts from trainers.classify_trainer import ClassifyTrainer as Trainer from manager import TrainingManager from transforms.random_lines import random_microscope, AdvancedHairAugmentation if __name__ == '__main__': opt = opts().parse() logger = TrainingManager(opt.save_dir) history = History(opt.save_dir, opt.resume) torch.backends.cudnn.benchmark = True print(opt) transforms = { "train": Compose([ IAAAffine(shear=12, p=0.7), ShiftScaleRotate(rotate_limit=45, scale_limit=(-0.5, 0.5)), Flip(), Transpose(), ElasticTransform(alpha=100, sigma=25, p=0.5), AdvancedHairAugmentation(hairs=10, hairs_folder="hairs"), random_microscope(), CoarseDropout(min_holes=8, max_width=16, max_height=16, p=0.75), OneOf([