Esempio n. 1
0
def init_model(cfg, device, id_gpu=None):
    model = models.resnet18(pretrained=True)
    dims_out = (128, 256, 512)
    model = ModelOuts4Resnet(model, dims_out)
    cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_res18'

    if cfg.MODE_TRAIN == 1 or cfg.MODE_TRAIN == 2 or cfg.MODE_TRAIN == 5:
        ''' 
        1. 输出4层 共享输出
        2. 输出4层 使用新FPN 共享
        '''
        cfg.STRIDES = [8, 16, 32, 64]  # 特有参数下采样步距
        cfg.SCALE_THRESHOLDS = [0, 49, 98, 196, 10000000000.0
                                ]  # 用于确保每一个特图预测相应大小的框,且一个GT只在一个层进行匹配
    elif cfg.MODE_TRAIN == 3 or cfg.MODE_TRAIN == 4:
        ''' 输出4层 使用新FPN 共享 '''
        # model = models.resnet50(pretrained=True)
        # dims_out = (512, 1024, 2048)
        # model = ModelOuts4Resnet(model, dims_out)
        # cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_res50'
        cfg.STRIDES = [8, 16, 32, 64, 128]  # size需要512 128的倍数
        cfg.SCALE_THRESHOLDS = [0, 64, 128, 256, 512, 10000000000.0]
    else:
        raise Exception('cfg.MODE_TRAIN 出错 cfg.MODE_TRAIN=%s' %
                        (cfg.MODE_TRAIN))

    model = Fcos(model, cfg, device)

    if cfg.IS_LOCK_BACKBONE_WEIGHT:
        for name, param in model.backbone.named_parameters():
            param.requires_grad_(False)

    model, is_mgpu = model_device_init(model, device, id_gpu, cfg)

    # ------------------------自定义backbone完成-------------------------------
    pg = model.parameters()
    optimizer = optim.Adam(pg, lr=cfg.LR0)
    # 两次不上升,降低一半
    # lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, factor=0.5, verbose=True)
    lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [50, 80, 100],
                                                  0.1)
    start_epoch = load_weight(cfg.FILE_FIT_WEIGHT,
                              model,
                              optimizer,
                              lr_scheduler,
                              device,
                              is_mgpu=is_mgpu)

    model.cfg = cfg
    return model, optimizer, lr_scheduler, start_epoch
Esempio n. 2
0
def init_model(cfg, device, id_gpu=None):
    model = models.resnet18(pretrained=True)
    dims_out = (128, 256, 512)
    model = ModelOuts4Resnet(model, dims_out)
    cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_res18'

    # model = models.resnet50(pretrained=True)
    # dims_out = (512, 1024, 2048)
    # model = ModelOuts4Resnet(model, dims_out)
    # cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_res50'

    cfg.STRIDES = [8, 16, 32, 64]  # 特有参数下采样步距
    assert cfg.IMAGE_SIZE[0] % cfg.STRIDES[
        -1] == 0, 'cfg.IMAGE_SIZE=%s 与 cfg.STRIDES=%s 不能匹配(整除)' % (
            cfg.IMAGE_SIZE, cfg.STRIDES)
    cfg.SCALE_THRESHOLDS = [0, 49, 98, 196,
                            10000000000.0]  # 用于确保每一个特图预测相应大小的框,且一个GT只在一个层进行匹配

    model = KFcos(model, cfg)

    if cfg.IS_LOCK_BACKBONE_WEIGHT:
        for name, param in model.backbone.named_parameters():
            param.requires_grad_(False)

    model, is_mgpu = model_device_init(model, device, id_gpu, cfg)

    # ------------------------自定义backbone完成-------------------------------
    pg = model.parameters()
    # optimizer = optim.Adam(pg, lr=cfg.LR0)
    optimizer = optim.SGD(pg, lr=cfg.LR0, momentum=0.9, weight_decay=5e-6)
    # 两次不上升,降低一半
    lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                        patience=10,
                                                        factor=0.1,
                                                        verbose=True)
    # lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [50, 80, 100], 0.1)
    start_epoch = load_weight(cfg.FILE_FIT_WEIGHT,
                              model,
                              optimizer,
                              lr_scheduler,
                              device,
                              is_mgpu=is_mgpu)

    model.cfg = cfg
    return model, optimizer, lr_scheduler, start_epoch
Esempio n. 3
0
def init_model(cfg, device, id_gpu=None):
    # model = darknet53(pretrained=True)
    # model = ModelOuts4DarkNet53(model)
    # cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_dark53'

    # model = darknet19(pretrained=True)
    # model = ModelOuts4DarkNet19(model)
    # cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_dark19'

    model = models.resnet18(pretrained=True)
    dims_out = (128, 256, 512)
    model = ModelOuts4Resnet(model, dims_out)
    cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_res18'

    # model = CSPDarknet_slim()
    # model.dims_out = (128, 256, 512)
    # cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_csspds'

    model = Yolo_v3(backbone=model, cfg=cfg)
    # f_look_model(model, input=(1, 3, *cfg.IMAGE_SIZE))

    if cfg.IS_LOCK_BACKBONE_WEIGHT:
        for name, param in model.net.backbone.named_parameters():
            param.requires_grad_(False)

    model, is_mgpu = model_device_init(model, device, id_gpu, cfg)
    # ------------------------模型完成-------------------------------

    pg = model.parameters()
    optimizer = optim.Adam(pg, cfg.LR0)
    lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [50, 80, 100],
                                                  0.1)
    start_epoch = load_weight(cfg.FILE_FIT_WEIGHT,
                              model,
                              optimizer,
                              lr_scheduler,
                              device,
                              is_mgpu=is_mgpu)

    model.cfg = cfg
    return model, optimizer, lr_scheduler, start_epoch
Esempio n. 4
0

if __name__ == '__main__':
    from f_pytorch.tools_model.model_look import f_look_tw
    ''' 模型测试 '''
    cfg = CFG()
    cfg.NUMS_ANC = [3, 3, 3]
    cfg.NUM_CLASSES = 3

    cfg.IMAGE_SIZE = (320, 320)
    cfg.STRIDES = [8, 16, 32, 64]
    cfg.MODE_TRAIN = 3

    model = models.resnet18(pretrained=True)
    dims_out = (128, 256, 512)
    model = ModelOuts4Resnet(model, dims_out)

    # model = FcosNet_v1(model, cfg)
    # model = FcosNet_v2(model, cfg, o_ceng=4, num_conv=3)
    # model = FcosNet_v2(model, cfg, o_ceng=5, num_conv=3)
    model = FcosNet_v3(model, cfg, o_ceng=5, num_conv=3)
    model.train()

    # cfg.STRIDES = [8, 16, 32, 64, 128]
    # model = FcosNet(model, cfg, o_ceng=5)

    print(model(torch.rand(2, 3, 416, 416)).shape)
    # model.eval()
    f_look_tw(model, input=(1, 3, 320, 320), name='fcos')

    # from f_pytorch.tools_model.model_look import f_look_summary
Esempio n. 5
0
def init_model(cfg, device, id_gpu=None):
    # model = models.densenet121(pretrained=True)
    # ret_name_dict = {'denseblock2': 1, 'denseblock3': 2, 'denseblock4': 3}
    # dims_out = [512, 1024, 1024]
    # model = ModelOuts4Densenet121(model, 'features', ret_name_dict, dims_out)
    # cfg.FEATURE_MAP_STEPS = [8, 16, 32]  # 特图的步距 下采倍数
    # cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + 'densenet121'

    # model = models.resnet50(pretrained=True)
    # dims_out = (512, 1024, 2048)
    # model = ModelOuts4Resnet(model, dims_out)
    # cfg.SAVE_FILE_NAME = cfg.SAVE_FILE_NAME + '_res50'
    # cfg.FEATURE_MAP_STEPS = [8, 16, 32]  # 特图的步距 下采倍数

    model = models.resnet18(pretrained=True)
    dims_out = (128, 256, 512)
    model = ModelOuts4Resnet(model, dims_out)

    cfg.FEATURE_MAP_STEPS = [8, 16, 32, 64, 128]

    # 每层特图5个anc
    cfg.NUMS_ANC = [1, 1, 1, 1, 1]
    cfg.ANCS_SCALE = [[0.078, 0.07775],
                      [0.174, 0.164],
                      [0.324, 0.336],
                      [0.578, 0.466],
                      [0.698, 0.674]]

    # cfg.NUMS_ANC = [3, 3, 3, 3, 3]
    # cfg.ANCS_SCALE = [[0.052, 0.046],
    #                   [0.088, 0.092],
    #                   [0.152, 0.106],
    #                   [0.184, 0.186],
    #                   [0.224, 0.32],
    #                   [0.38, 0.242],
    #                   [0.338, 0.424],
    #                   [0.572, 0.32],
    #                   [0.34, 0.624],
    #                   [0.484, 0.527],
    #                   [0.7, 0.44],
    #                   [0.642, 0.656],
    #                   [0.836, 0.573],
    #                   [0.61, 0.882],
    #                   [0.92, 0.74231]]
    # cfg.LOSS_WEIGHT = [1, 1, 1, 1, 1]  # conf_pos conf_neg cls loss_txty  loss_twth
    model = Retina2(model, cfg, device)
    # model = Retina3(model, cfg, device)

    # cfg.NUMS_ANC = [3, 3, 3]
    # cfg.FEATURE_MAP_STEPS = [8, 16, 32]
    # model = Retina(model, cfg, device)

    if cfg.IS_LOCK_BACKBONE_WEIGHT:
        for name, param in model.backbone.named_parameters():
            param.requires_grad_(False)

    model, is_mgpu = model_device_init(model, device, id_gpu, cfg)

    # ------------------------自定义backbone完成-------------------------------
    pg = model.parameters()
    optimizer = optim.Adam(pg, lr=cfg.LR0)
    # 两次不上升,降低一半
    # lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, factor=0.5, verbose=True)
    lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [50, 70, 100], 0.1)
    start_epoch = load_weight(cfg.FILE_FIT_WEIGHT, model, optimizer, lr_scheduler, device, is_mgpu=is_mgpu)

    model.cfg = cfg
    return model, optimizer, lr_scheduler, start_epoch