def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) if cfg.SOLVER.OPTIM == "SGD": optimizer = torch.optim.SGD( model.parameters(), lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY ) if cfg.SOLVER.OPTIM == "Adam": optimizer = torch.optim.Adam( model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY ) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train( cfg, model, train_loader, optimizer, checkpointer, arguments) return model
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) if cfg.SOLVER.TYPE == "adam": optimizer = torch.optim.Adam( model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY, ) elif cfg.SOLVER.TYPE == "sgd": optimizer = torch.optim.SGD(model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY, momentum=cfg.SOLVER.MOMENTUM) else: # Default to Adam if incorrect solver print("WARNING: Incorrect solver type, defaulting to Adam") optimizer = torch.optim.Adam( model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY, ) scheduler = LinearMultiStepWarmUp(cfg, optimizer) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments, scheduler) return model
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) optimizer = torch.optim.SGD( filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY, nesterov=True, ) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=int(cfg.SOLVER.MAX_ITER / 1000), eta_min=0) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments, scheduler) return model
def start_train(cfg, visualize_example=False): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) print(model) model = torch_utils.to_cuda(model) optimizer = torch.optim.SGD(model.parameters(), lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY) """ optimizer = torch.optim.Adam( model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY ) """ """ lr_scheduler = torch.optim.lr_scheduler.CyclicLR( optimizer= optimizer, base_lr= cfg.SOLVER.LR /10, max_lr=0.05, step_size_up=8000, mode='triangular2' ) """ arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments, visualize_example, lr_scheduler=None) return model
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) # SGD # optimizer = torch.optim.SGD( # model.parameters(), # lr=cfg.SOLVER.LR, # momentum=cfg.SOLVER.MOMENTUM, # weight_decay=cfg.SOLVER.WEIGHT_DECAY # ) # Adam optimizer = torch.optim.Adam(model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[6000, 10000], gamma=cfg.SOLVER.GAMMA) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments, scheduler) return model
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) """ optimizer = torch.optim.SGD( model.parameters(), lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY ) """ # new optimizer for task4c optimizer = torch.optim.Adam(params=model.parameters(), lr=cfg.SOLVER.LR, betas=(0.9, 0.999), eps=1e-08, weight_decay=cfg.SOLVER.WEIGHT_DECAY, amsgrad=False) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments) return model
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) lr = cfg.SOLVER.LR #optimizer = make_optimizer(cfg, model, lr) optimizer = torch.optim.Adam(model.parameters(), lr, weight_decay=0.005) milestones = [step for step in cfg.SOLVER.LR_STEPS] scheduler = make_lr_scheduler(cfg, optimizer, milestones) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, arguments) return model