예제 #1
0
def build_opt_lr(model, cfg, args, epoch):
    """
    获取优化方法和学习率
    :param model:
    :param cfg:
    :param args:
    :param epoch:
    :return:
    """
    # 获取要训练的网络
    backbone_feature = model.features.param_groups(
        cfg['lr']['start_lr'], cfg['lr']['feature_lr_mult'])
    if len(backbone_feature) == 0:
        # 获取要训练的rpn网络的参数
        trainable_params = model.rpn_model.param_groups(
            cfg['lr']['start_lr'], cfg['lr']['rpn_lr_mult'], 'mask')
    else:
        # 获取基础网络,rpn和mask网络的训练参数
        trainable_params = backbone_feature + \
                           model.rpn_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['rpn_lr_mult']) + \
                           model.mask_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['mask_lr_mult'])
    # 随机梯度下降算法优化
    optimizer = torch.optim.SGD(trainable_params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # 获取学习率
    lr_scheduler = build_lr_scheduler(optimizer, cfg['lr'], epochs=args.epochs)
    # 更新学习率
    lr_scheduler.step(epoch)
    # 返回优化器和学习率
    return optimizer, lr_scheduler
예제 #2
0
def build_opt_lr(model, cfg, args, epoch):
    trainable_params = model.features.param_groups(cfg['lr']['start_lr'], cfg['lr']['feature_lr_mult']) + \
            model.rpn_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['rpn_lr_mult'])

    optimizer = torch.optim.SGD(trainable_params, args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    lr_scheduler = build_lr_scheduler(optimizer, cfg['lr'], epochs=args.epochs)

    lr_scheduler.step(epoch)

    return optimizer, lr_scheduler
예제 #3
0
def build_opt_lr(model, epoch):
    trainable_params = []
    for param in model.backbone.parameters():
        param.requires_grad = False
    for i in range(len(cfg.backbone.unfix_steps)):
        if epoch >= cfg.backbone.unfix_steps[i]:
            layer = getattr(model.backbone, cfg.backbone.unfix_layers[i])
            for param in layer.parameters():
                param.requires_grad = True
            trainable_params += [{
                'params': layer.parameters(),
                'lr': cfg.backbone.unfix_lr[i]
            }]

    if cfg.adjust.adjust:
        trainable_params += [{'params': model.neck.parameters(), 'lr': 1}]
    trainable_params += [{'params': model.siamese.parameters(), 'lr': 1}]
    if cfg.cornerdet.cornerdet:
        if epoch >= cfg.train.pretrain_epoch:
            if cfg.attention.attention:
                for param in model.attention.parameters():
                    param.requires_grad = True
                trainable_params += [{
                    'params': model.attention.parameters(),
                    'lr': 1
                }]
            for param in model.cornerdet.parameters():
                param.requires_grad = True
            trainable_params += [{
                'params': model.cornerdet.parameters(),
                'lr': 1
            }]
        else:
            if cfg.attention.attention:
                for param in model.attention.parameters():
                    param.requires_grad = False
            for param in model.cornerdet.parameters():
                param.requires_grad = False
    optimizer = torch.optim.SGD(trainable_params,
                                momentum=cfg.train.momentum,
                                weight_decay=cfg.train.weight_decay)
    lr_scheduler = build_lr_scheduler(optimizer, cfg.train.lr, cfg.train.epoch)
    logger.info('change training parameters.')
    logger.info("model\n{}".format(describe(model)))
    return optimizer, lr_scheduler
예제 #4
0
def build_opt_lr(model, cfg, args, epoch):
    backbone_feature = model.features.param_groups(
        cfg['lr']['start_lr'], cfg['lr']['feature_lr_mult'])
    if len(backbone_feature) == 0:
        trainable_params = model.rpn_model.param_groups(
            cfg['lr']['start_lr'], cfg['lr']['rpn_lr_mult'], 'mask')
    else:
        trainable_params = backbone_feature + \
                           model.rpn_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['rpn_lr_mult']) + \
                           model.kp_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['mask_lr_mult']) + \
                           model.pose_corr.param_groups(cfg['lr']['start_lr'], cfg['lr']['mask_lr_mult'])

    optimizer = torch.optim.SGD(trainable_params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    lr_scheduler = build_lr_scheduler(optimizer, cfg['lr'], epochs=args.epochs)

    lr_scheduler.step(epoch)

    return optimizer, lr_scheduler
예제 #5
0
def build_opt_lr(model, cfg, args, epoch):
    '''
    对模型参数进行优化
    :param model:
    :param cfg:
    :param args:
    :param epoch:
    :return:
    '''
    trainable_params = model.mask_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['mask_lr_mult']) + \
                       model.refine_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['mask_lr_mult'])
    # 随机梯度下降算法:
    optimizer = torch.optim.SGD(trainable_params, args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # 获取学习率
    lr_scheduler = build_lr_scheduler(optimizer, cfg['lr'], epochs=args.epochs)
    # 更新学习率
    lr_scheduler.step(epoch)

    return optimizer, lr_scheduler