Example #1
0
                                                      optimizer,
                                                      filename=args.ckpt,
                                                      logger=logger)
        last_epoch = start_epoch + 1

    lr_scheduler, bnm_scheduler = create_scheduler(
        optimizer,
        total_steps=len(train_loader) * args.epochs,
        last_epoch=last_epoch)

    if args.rpn_ckpt is not None:
        pure_model = model.module if isinstance(
            model, torch.nn.DataParallel) else model
        total_keys = pure_model.state_dict().keys().__len__()
        train_utils.load_part_ckpt(pure_model,
                                   filename=args.rpn_ckpt,
                                   logger=logger,
                                   total_keys=total_keys)

    if cfg.TRAIN.LR_WARMUP and cfg.TRAIN.OPTIMIZER != 'adam_onecycle':
        lr_warmup_scheduler = train_utils.CosineWarmupLR(
            optimizer,
            T_max=cfg.TRAIN.WARMUP_EPOCH * len(train_loader),
            eta_min=cfg.TRAIN.WARMUP_MIN)
    else:
        lr_warmup_scheduler = None

    # start training
    logger.info('**********************Start training**********************')
    ckpt_dir = os.path.join(root_result_dir, 'ckpt')
    os.makedirs(ckpt_dir, exist_ok=True)
    trainer = train_utils.Trainer(
Example #2
0
                      mode='TRAIN')
    optimizer = create_optimizer(model)

    if args.mgpus:
        model = nn.DataParallel(model)
    model.cuda()

    # load checkpoint if it is possible
    start_iter = it = 0
    last_iter = -1

    if args.pretrain_ckpt is not None:
        pure_model = model.module if isinstance(
            model, torch.nn.DataParallel) else model
        train_utils.load_part_ckpt(pure_model,
                                   filename=args.pretrain_ckpt,
                                   logger=logger)

    if args.ckpt is not None:
        pure_model = model.module if isinstance(
            model, torch.nn.DataParallel) else model
        it, _ = train_utils.load_checkpoint(pure_model,
                                            optimizer,
                                            filename=args.ckpt,
                                            logger=logger)
        last_iter = it + 1

    lr_scheduler, bnm_scheduler = create_scheduler(
        optimizer, total_steps=args.total_iters, last_iter=last_iter)
    lr_warmup_scheduler = None