def build_opt_lr_latent_last(model, current_epoch=0):


    trainable_params = []
    # trainable_params += [{'params': filter(lambda x: x.requires_grad,
    #                                        model.backbone.parameters()),
    #                       'lr': cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR}]

    trainable_params += [{'params': filter(lambda x: x.requires_grad,
                                           model.rpn_head.parameters()),
                          'lr': cfg.TRAIN.BASE_LR}]

    if cfg.MASK.MASK:
        trainable_params += [{'params': model.mask_head.parameters(),
                              'lr': cfg.TRAIN.BASE_LR}]

    if cfg.REFINE.REFINE:
        trainable_params += [{'params': model.refine_head.parameters(),
                              'lr': cfg.TRAIN.BASE_LR}]

    optimizer = torch.optim.SGD(trainable_params,
                                momentum=cfg.TRAIN.MOMENTUM,
                                weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
    lr_scheduler.step(cfg.TRAIN.START_EPOCH)
    return optimizer, lr_scheduler
Example #2
0
def build_opt_lr(model, current_epoch=0):
    '''
    :param model:
    :param current_epoch:
    :return:
    '''
    if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
        for layer in cfg.BACKBONE.TRAIN_LAYERS:
            for param in getattr(model.backbone, layer).parameters():
                param.requires_grad = True
            for m in getattr(model.backbone, layer).modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.train()
    else:
        for param in model.backbone.parameters():
            param.requires_grad = False
        for m in model.backbone.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()
    #backbone的后几层的学习率要比rpn层的学习率小10倍,也就是在基础学习率BASE_LR上乘以LAYERS_LR
    trainable_params = []
    trainable_params += [{
        'params':
        filter(lambda x: x.requires_grad, model.backbone.parameters()),
        'lr':
        cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR
    }]
    #剩下的所有的和rpn相关的学习率都按照基础学习率作为基准
    if cfg.ADJUST.ADJUST:
        trainable_params += [{
            'params': model.neck.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    trainable_params += [{
        'params': model.rpn_head.parameters(),
        'lr': cfg.TRAIN.BASE_LR
    }]

    if cfg.MASK.MASK:
        trainable_params += [{
            'params': model.mask_head.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    if cfg.REFINE.REFINE:
        trainable_params += [{
            'params': model.refine_head.parameters(),
            'lr': cfg.TRAIN.LR.BASE_LR
        }]
    #优化器使用带动量的SGD
    optimizer = torch.optim.SGD(trainable_params,
                                momentum=cfg.TRAIN.MOMENTUM,
                                weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
    lr_scheduler.step(cfg.TRAIN.START_EPOCH)
    return optimizer, lr_scheduler
Example #3
0
def build_opt_lr(model, current_epoch=0):
    for param in model.backbone.parameters():
        param.requires_grad = False
    for m in model.backbone.modules():
        if isinstance(m, nn.BatchNorm2d):
            m.eval()
    if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
        for layer in cfg.BACKBONE.TRAIN_LAYERS:
            for param in getattr(model.backbone, layer).parameters():
                param.requires_grad = True
            for m in getattr(model.backbone, layer).modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.train()

    trainable_params = []
    trainable_params += [{
        'params':
        filter(lambda x: x.requires_grad, model.backbone.parameters()),
        'lr':
        cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR
    }]

    if cfg.ADJUST.ADJUST:
        trainable_params += [{
            'params': model.neck.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]
    if cfg.TRANSFORMER.TRANSFORMER:
        trainable_params += [{
            'params': model.tr_head.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]
    else:
        trainable_params += [{
            'params': model.rpn_head.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    if cfg.MASK.MASK:
        trainable_params += [{
            'params': model.mask_head.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    if cfg.REFINE.REFINE:
        trainable_params += [{
            'params': model.refine_head.parameters(),
            'lr': cfg.TRAIN.LR.BASE_LR
        }]

    optimizer = torch.optim.SGD(trainable_params,
                                momentum=cfg.TRAIN.MOMENTUM,
                                weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
    lr_scheduler.step(cfg.TRAIN.START_EPOCH)
    return optimizer, lr_scheduler
Example #4
0
def build_opt_lr(model, current_epoch=0):
    if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
        if cfg.BACKBONE.TYPE == 'HourglassNet' or cfg.BACKBONE.TYPE == 'xception':
            for param in model.backbone.parameters():
                param.requires_grad = True
            for m in model.backbone.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.train()
        else:
            for layer in cfg.BACKBONE.TRAIN_LAYERS:
                for param in getattr(model.backbone, layer).parameters():
                    param.requires_grad = True
                for m in getattr(model.backbone, layer).modules():
                    if isinstance(m, nn.BatchNorm2d):
                        m.train()
    else:
        for param in model.backbone.parameters():
            param.requires_grad = False
        for m in model.backbone.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()

    trainable_params = []
    trainable_params += [{
        'params':
        filter(lambda x: x.requires_grad, model.backbone.parameters()),
        'lr':
        cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR
    }]

    if cfg.ADJUST.ADJUST:
        trainable_params += [{
            'params': model.neck.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    trainable_params += [{
        'params': model.kpn_head.parameters(),
        'lr': cfg.TRAIN.BASE_LR
    }]

    if cfg.TRAIN.OPTI == 'adam':
        optimizer = torch.optim.Adam(trainable_params,
                                     weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.SGD(trainable_params,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
    return optimizer, lr_scheduler
Example #5
0
def build_opt_lr(model, current_epoch=0):
    # The last 10 epoch
    if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
        for layer in cfg.BACKBONE.TRAIN_LAYERS:
            for param in getattr(model.backbone, layer).parameters():
                param.requires_grad = True
            for m in getattr(model.backbone, layer).modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.train()
    else:
        for param in model.backbone.parameters():
            param.requires_grad = False
        for m in model.backbone.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()

        # SET CHANNEL_REDUCE PARAMETERS
        for layer in cfg.BACKBONE.CHANNEL_REDUCE_LAYERS:
            for param in getattr(model.backbone, layer).parameters():
                param.requires_grad = True

    trainable_params = []
    trainable_params += [{
        'params':
        filter(lambda x: x.requires_grad, model.backbone.parameters()),
        'lr':
        cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR
    }]

    trainable_params += [{
        'params': model.car_head.parameters(),
        'lr': cfg.TRAIN.BASE_LR
    }]

    if cfg.TRAIN.ATTENTION:
        trainable_params += [{
            'params': model.attention.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    optimizer = torch.optim.SGD(trainable_params,
                                momentum=cfg.TRAIN.MOMENTUM,
                                weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
    lr_scheduler.step(cfg.TRAIN.START_EPOCH)
    return optimizer, lr_scheduler
Example #6
0
def build_opt_lr(model, current_epoch=0):
    if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
        for layer in cfg.BACKBONE.TRAIN_LAYERS:
            for param in getattr(model.backbone, layer).parameters():
                param.requires_grad = True
            for m in getattr(model.backbone, layer).modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.train()
    else:
        # 前10个epochs,冻结backbone的训练参数
        for param in model.backbone.parameters():
            param.requires_grad = False
        for m in model.backbone.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval() # ????

    trainable_params = []
    # 在model.backbone.parameters()里面筛选得到x.requires_grad=True的参数
    trainable_params += [{'params': filter(lambda x: x.requires_grad,
                                           model.backbone.parameters()),
                          'lr': cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR}]  # lr:0.1 x 0.005

    if cfg.ADJUST.ADJUST:
        trainable_params += [{'params': model.neck.parameters(),
                              'lr': cfg.TRAIN.BASE_LR}]

    trainable_params += [{'params': model.rpn_head.parameters(),
                          'lr': cfg.TRAIN.BASE_LR}]

    if cfg.MASK.MASK:
        trainable_params += [{'params': model.mask_head.parameters(),
                              'lr': cfg.TRAIN.BASE_LR}]

    if cfg.REFINE.REFINE:
        trainable_params += [{'params': model.refine_head.parameters(),
                              'lr': cfg.TRAIN.LR.BASE_LR}]

    optimizer = torch.optim.SGD(trainable_params,
                                momentum=cfg.TRAIN.MOMENTUM,
                                weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH) # 构建学习率调度器,[],共20个,前5个为warmup,后15个为正常的训练
    lr_scheduler.step(cfg.TRAIN.START_EPOCH) # 用来更新参数
    return optimizer, lr_scheduler
Example #7
0
def build_opt_lr2(netD, current_epoch=0):
    paramsD = []
    for key, value in dict(netD.discriminator.named_parameters()).items():
        if 'conv' in key:
            if 'bias' in key:
                paramsD += [{'params': [value], 'weight_decay': 0, 'lr': 0.01}]
            else:  # weight
                paramsD += [{
                    'params': [value],
                    'weight_decay': cfg.TRAIN.WEIGHT_DECAY,
                    'lr': 0.01
                }]
        if 'bn' in key:
            paramsD += [{'params': [value], 'weight_decay': 0, 'lr': 0.01}]
    optimizer2 = torch.optim.SGD(paramsD,
                                 momentum=cfg.TRAIN.MOMENTUM,
                                 weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    lr_scheduler2 = build_lr_scheduler(optimizer2, epochs=cfg.TRAIN.EPOCH)
    lr_scheduler2.step(cfg.TRAIN.START_EPOCH)
    return optimizer2, lr_scheduler2, paramsD
Example #8
0
def build_opt_lr(model, current_epoch=0):

    # # resnet的后三阶段训练fine-tune
    # if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
    #     for layer in cfg.BACKBONE.TRAIN_LAYERS:
    #         for param in getattr(model.backbone, layer).parameters():
    #             param.requires_grad = True
    #         for m in getattr(model.backbone, layer).modules():
    #             if isinstance(m, nn.BatchNorm2d):
    #                 m.train()
    # else:
    #     for param in model.backbone.parameters():
    #         param.requires_grad = False
    #     for m in model.backbone.modules():
    #         if isinstance(m, nn.BatchNorm2d):
    #             m.eval()

    if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
        for layer in cfg.BACKBONE.TRAIN_LAYERS:
            # 对efficientnet的后三阶段训练fine-tune
            if layer == '_blocks':
                for idx, block in enumerate(getattr(model.backbone, layer)):
                    if idx >= 20:
                        for param in block.parameters():
                            param.requires_grad = True
                        for m in block.modules():
                            if isinstance(m, nn.BatchNorm2d):
                                m.train()
            else:
                for param in getattr(model.backbone, layer).parameters():
                    param.requires_grad = True
                for m in getattr(model.backbone, layer).modules():
                    if isinstance(m, nn.BatchNorm2d):
                        m.train()
    else:
        for param in model.backbone.parameters():
            param.requires_grad = False
        for m in model.backbone.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()

    # if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
    #     for param in model.backbone.parameters():
    #         param.requires_grad = True
    #     model.backbone.train()
    #     for m in model.backbone.modules():
    #         m.train()
    # else:
    #     for param in model.backbone.parameters():
    #         param.requires_grad = False
    #     for m in model.backbone.modules():
    #         m.eval()

    trainable_params = []
    trainable_params += [{
        'params':
        filter(lambda x: x.requires_grad, model.backbone.parameters()),
        'lr':
        cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR
    }]

    if cfg.ADJUST.ADJUST:
        trainable_params += [{
            'params': model.neck.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    trainable_params += [{
        'params': model.rpn_head.parameters(),
        'lr': cfg.TRAIN.BASE_LR
    }]

    # if cfg.MASK.MASK:
    #     trainable_params += [{'params': model.mask_head.parameters(),
    #                           'lr': cfg.TRAIN.BASE_LR}]
    #
    # if cfg.REFINE.REFINE:
    #     trainable_params += [{'params': model.refine_head.parameters(),
    #                           'lr': cfg.TRAIN.LR.BASE_LR}]

    optimizer = torch.optim.SGD(trainable_params,
                                momentum=cfg.TRAIN.MOMENTUM,
                                weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
    lr_scheduler.step(cfg.TRAIN.START_EPOCH)
    return optimizer, lr_scheduler
Example #9
0
def build_opt_lr2(netD, current_epoch=0):
    trainable_params = []
    #    for param in netD.module.discriminator.parameters():
    #        param.requires_grad = True
    #        print("params:",netD.module.discriminator.parameters())

    #    for m in netD.module.discriminator.modules():
    #        if isinstance(m, nn.BatchNorm2d):
    #            m.train()
    #            print("modules:",netD.module.discriminator.modules())

    #    trainable_params += [{'params': filter(lambda x: x.requires_grad,
    #                                           netD.module.discriminator.parameters()),
    #                          'lr': cfg.TRAIN.BASE_LR}]
    model = netD.module
    if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
        for layer in cfg.BACKBONE.TRAIN_LAYERS:
            for param in getattr(model.netG.backbone, layer).parameters():
                param.requires_grad = True
            for m in getattr(model.netG.backbone, layer).modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.train()
    else:
        for param in model.netG.backbone.parameters():
            param.requires_grad = False
        for m in model.netG.backbone.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()
    trainable_params += [{
        'params':
        filter(lambda x: x.requires_grad, model.netG.backbone.parameters()),
        'lr':
        cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR
    }]

    if cfg.ADJUST.ADJUST:
        trainable_params += [{
            'params': model.netG.neck.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    trainable_params += [{
        'params': model.netG.rpn_head.parameters(),
        'lr': cfg.TRAIN.BASE_LR
    }]

    if cfg.MASK.MASK:
        trainable_params += [{
            'params': model.netG.mask_head.parameters(),
            'lr': cfg.TRAIN.BASE_LR
        }]

    if cfg.REFINE.REFINE:
        trainable_params += [{
            'params': model.netG.refine_head.parameters(),
            'lr': cfg.TRAIN.LR.BASE_LR
        }]

    trainable_params += [{
        'params': [
            model.netG.PENALTY_K, model.netG.WINDOW_INFLUENCE,
            model.netG.CONTEXT_AMOUNT, model.netG.LR
        ],
        'lr':
        cfg.TRAIN.BASE_LR
    }]
    optimizer = torch.optim.SGD(trainable_params,
                                momentum=cfg.TRAIN.MOMENTUM,
                                weight_decay=cfg.TRAIN.WEIGHT_DECAY)

    #    print(trainable_params)
    lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
    lr_scheduler.step(cfg.TRAIN.START_EPOCH)

    return optimizer, lr_scheduler