Ejemplo n.º 1
0
def _CUB200():
    """
    CUB200数据集
    :return:
    """
    # 定义模型 定义评价 优化器等
    lr = 1e-4
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    model = Net(resnet.resnet50(pretrained=True), 200)
    model.to(device)
    criterion = torch.nn.CrossEntropyLoss()
    # optimzer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=0.0001)
    optimzer = torch.optim.SGD([{
        'params': model.resnet.parameters()
    }, {
        'params': model.fc.parameters(),
        'lr': lr * 10
    }],
                               lr=lr,
                               momentum=0.9,
                               weight_decay=0.0001)
    torch.optim.lr_scheduler.StepLR(optimzer, 50, gamma=0.1, last_epoch=-1)
    epochs = 150
    batchSize = 24
    worker = 2

    modelConfig = {
        'model': model,
        'criterion': criterion,
        'optimzer': optimzer,
        'epochs': epochs,
        'device': device
    }

    trainLoader, testLoader, validLoader, trainLength, testLength, validLength = chooseData(
        'CUB200', batchSize, worker)

    # 没有验证集,所以使用测试集来做验证集
    dataConfig = {
        'trainLoader': trainLoader,
        'validLoader': testLoader,
        'trainLength': trainLength,
        'validLength': testLength
    }

    modelPath = os.path.join(os.getcwd(), 'checkpoints', '_CUB200.pth')
    lastModelPath = os.path.join(os.getcwd(), 'checkpoints',
                                 '_CUB200_last.pth')
    historyPath = os.path.join(os.getcwd(), 'historys', '_CUB200.npy')
    logPath = os.path.join(os.getcwd(), 'logs', '_CUB200.txt')

    logConfig = {
        'modelPath': modelPath,
        'historyPath': historyPath,
        'logPath': logPath,
        'lastModelPath': lastModelPath
    }

    train(modelConfig, dataConfig, logConfig)
Ejemplo n.º 2
0
def _stanfordDogs():
    """
     StanfordDogs数据集
     :return:
     """

    # 定义模型 定义评价 优化器等
    device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    model = Net(models.resnet50(pretrained=True), 120)
    model.to(device)
    criterion = torch.nn.CrossEntropyLoss()
    optimzer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.9, )
    torch.optim.lr_scheduler.StepLR(optimzer, 50, gamma=0.1, last_epoch=-1)
    epochs = 150
    batchSize = 80
    worker = 2

    modelConfig = {
        'model':model,
        'criterion':criterion,
        'optimzer':optimzer,
        'epochs':epochs,
        'device':device
    }


    trainLoader, testLoader, validLoader, trainLength, testLength, validLength = chooseData('STANFORDDOGS', batchSize,worker)
    # 没有验证集,所以使用测试集来做验证集
    dataConfig = {
        'trainLoader':trainLoader,
        'validLoader': testLoader,
        'trainLength': trainLength,
        'validLength': testLength
    }

    modelPath = os.path.join(os.getcwd(), 'checkpoints', '_stanforddogs.pth')
    lastModelPath = os.path.join(os.getcwd(), 'checkpoints', '_stanforddogs_last.pth')
    historyPath = os.path.join(os.getcwd(), 'historys', '_stanforddogs.npy')
    logPath = os.path.join(os.getcwd(), 'logs', '_stanforddogs.txt')

    logConfig = {
        'modelPath': modelPath,
        'historyPath':historyPath,
        'logPath':logPath,
        'lastModelPath':lastModelPath
    }

    train(modelConfig,dataConfig,logConfig)
Ejemplo n.º 3
0
def _CUB200():
    """
    CUB200数据集
    :return:
    """
    # 定义模型 定义评价 优化器等
    lr = 1e-4
    class_num = 200
    torch.manual_seed(0)
    torch.cuda.manual_seed_all(0)
    print("cuda:0,1,2,3")
    device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    model = Net(resnet_for_pmg.resnet50(pretrained=True), class_num)
    device_ids = [0, 1, 2, 3]
    model = nn.DataParallel(model, device_ids=device_ids).cuda(0)
    model.to(f'cuda:{model.device_ids[0]}')
    criterion = torch.nn.CrossEntropyLoss()

    optimzer = torch.optim.SGD(
        [
            {
                'params': model.module.pmg.features.parameters(),
                'lr': lr * 1
            },
            {
                'params': model.module.pmg.classifier_concat.parameters(),
                'lr': lr * 10
            },
            {
                'params': model.module.pmg.classifier1.parameters(),
                'lr': lr * 10
            },
            {
                'params': model.module.pmg.classifier2.parameters(),
                'lr': lr * 10
            },
            {
                'params': model.module.pmg.classifier3.parameters(),
                'lr': lr * 10
            },
            {
                'params': model.module.pmg.conv_block1.parameters(),
                'lr': lr * 10
            },
            {
                'params': model.module.pmg.conv_block2.parameters(),
                'lr': lr * 10
            },
            {
                'params': model.module.pmg.conv_block3.parameters(),
                'lr': lr * 10
            },
            # {'params': model.module.pmg.map1.parameters(), 'lr': lr * 10},
            # {'params': model.module.pmg.map2.parameters(), 'lr': lr * 10},
            # {'params': model.module.pmg.fc.parameters(), 'lr': lr * 10},
        ],
        lr=lr,
        momentum=0.9,
        weight_decay=5e-4)

    # torch.optim.lr_scheduler.StepLR(optimzer, 10, gamma=0.94, last_epoch=-1)
    torch.optim.lr_scheduler.CosineAnnealingLR(optimzer, T_max=10)
    epochs = 200
    batchSize = 32
    worker = 4
    modelConfig = {
        'model': model,
        'criterion': criterion,
        'optimzer': optimzer,
        'epochs': epochs,
        'device': device
    }

    from torchvision import transforms as T
    # 自定义数据增强方式
    trainTransforms = T.Compose([
        T.Resize(550),
        T.RandomCrop(448, padding=8),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        # T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    testTransforms = T.Compose([
        T.Resize(550),
        T.CenterCrop(448),
        T.ToTensor(),
        T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        # T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    trainLoader, testLoader, validLoader, trainLength, testLength, validLength = chooseData(
        'CUB200', batchSize, worker, trainTransforms, testTransforms)

    # 没有验证集,所以使用测试集来做验证集
    dataConfig = {
        'trainLoader': trainLoader,
        'validLoader': testLoader,
        'trainLength': trainLength,
        'validLength': testLength
    }

    modelPath = os.path.join(os.getcwd(), 'checkpoints', '_CUB200.pth')
    lastModelPath = os.path.join(os.getcwd(), 'checkpoints',
                                 '_CUB200_last.pth')
    historyPath = os.path.join(os.getcwd(), 'historys', '_CUB200.npy')
    logPath = os.path.join(os.getcwd(), 'logs', '_CUB200.txt')

    logConfig = {
        'modelPath': modelPath,
        'historyPath': historyPath,
        'logPath': logPath,
        'lastModelPath': lastModelPath
    }

    train(modelConfig, dataConfig, logConfig)
Ejemplo n.º 4
0
def _stanfordCars():
    """
       StanfordCars数据集
       :return:
       """
    # 定义模型 定义评价 优化器等
    n_output = 196
    lr = 1e-4
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    model = Net(mobilenet.mobilenet_v2(pretrained=True), CLASS=n_output)
    # model = BuildAlexNet('pre', n_output)
    model.to(device)
    criterion = torch.nn.CrossEntropyLoss()
    # optimzer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.90, weight_decay=0.0001)
    optimzer = torch.optim.SGD([
        {'params': model.resnet.parameters()},
        {'params': model.fc.parameters(), 'lr': lr * 100}],
        lr=lr, momentum=0.9, weight_decay=0.0001)
    torch.optim.lr_scheduler.StepLR(optimzer, 50, gamma=0.1, last_epoch=-1)
    epochs = 150
    batchSize = 24
    worker = 2

    modelConfig = {
        'model': model,
        'criterion': criterion,
        'optimzer': optimzer,
        'epochs': epochs,
        'device': device
    }

    from torchvision import transforms as T
    # 自定义数据增强方式
    # normalize 加快收敛
    normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    trainTransforms = T.Compose([
        T.Resize(512),
        T.RandomRotation(15),
        # T.RandomResizedCrop(224,scale=(0.85,1.15)),
        T.RandomCrop(448),
        T.ToTensor(),
        normalize
    ])

    testTransforms = T.Compose([
        T.Resize(512),
        # T.RandomCrop(224),
        T.CenterCrop(448),
        T.ToTensor(),
        normalize
    ])

    trainLoader, testLoader, validLoader, trainLength, testLength, validLength = chooseData('STANFORDCARS', batchSize, worker,trainTransforms,testTransforms)

    # 没有验证集,所以使用测试集来做验证集
    dataConfig = {
        'trainLoader': trainLoader,
        'validLoader': testLoader,
        'trainLength': trainLength,
        'validLength': testLength
    }

    modelPath = os.path.join(os.getcwd(), 'checkpoints', '_stanfordcars.pth')
    lastModelPath = os.path.join(os.getcwd(), 'checkpoints', '_stanfordcars_last.pth')
    historyPath = os.path.join(os.getcwd(), 'historys', '_stanfordcars.npy')
    logPath = os.path.join(os.getcwd(), 'logs', '_stanfordcars.txt')

    logConfig = {
        'modelPath': modelPath,
        'historyPath': historyPath,
        'logPath': logPath,
        'lastModelPath': lastModelPath
    }

    train(modelConfig, dataConfig, logConfig)
Ejemplo n.º 5
0
def _stanfordDogs():
    """
     StanfordDogs数据集
     :return:
     """

    # 定义模型 定义评价 优化器等
    lr = 1e-4
    print("cuda:2")
    device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    model = Net(SE_resnet.resnet50(pretrained=True), 120)#看这里
    model.to(device)
    criterion = torch.nn.CrossEntropyLoss()
    # optimzer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9, weight_decay=0.0001)
    # backbone_params = model.children()[:-3].parameters()
    # attention_classfication_params = model.children()[-3:].parameters()

    # backbone_params = list(map(id, model.resnet.parameters()))
    # attention_classfication_params = filter(lambda p: id(p) not in backbone_params, model.parameters())

    optimzer = torch.optim.SGD([
        {'params': model.resnet.parameters()},
        {'params': model.fc1.parameters(), 'lr': lr * 10}],
        lr=lr, momentum=0.9, weight_decay=0.0001)

    torch.optim.lr_scheduler.StepLR(optimzer, 50, gamma=0.1, last_epoch=-1)
    # torch.optim.lr_scheduler.CosineAnnealingLR
    epochs = 150
    batchSize = 64
    worker = 2
    modelConfig = {
        'model':model,
        'criterion':criterion,
        'optimzer':optimzer,
        'epochs':epochs,
        'device':device
    }


    trainLoader, testLoader, validLoader, trainLength, testLength, validLength = chooseData('STANFORDDOGS', batchSize,worker)
    # 没有验证集,所以使用测试集来做验证集
    dataConfig = {
        'trainLoader':trainLoader,
        'validLoader': testLoader,
        'trainLength': trainLength,
        'validLength': testLength
    }

    modelPath = os.path.join(os.getcwd(), 'checkpoints', '_stanforddogs.pth')
    lastModelPath = os.path.join(os.getcwd(), 'checkpoints', '_stanforddogs_last.pth')
    historyPath = os.path.join(os.getcwd(), 'historys', '_stanforddogs.npy')
    logPath = os.path.join(os.getcwd(), 'logs', '_stanforddogs.txt')

    logConfig = {
        'modelPath': modelPath,
        'historyPath':historyPath,
        'logPath':logPath,
        'lastModelPath':lastModelPath
    }

    train(modelConfig,dataConfig,logConfig)
Ejemplo n.º 6
0
def _stanfordDogs():
    """
     StanfordDogs数据集
     :return:
     """

    # 定义模型 定义评价 优化器等
    lr = 1e-4
    class_num = 120
    print("cuda:2")
    device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    model = Net(resnet_for_pmg.resnet50(pretrained=True), class_num)
    device_ids = [0]
    model = nn.DataParallel(model, device_ids=device_ids).cuda(0)
    model.to(f'cuda:{model.device_ids[0]}')
    criterion = torch.nn.CrossEntropyLoss()

    optimzer = torch.optim.SGD([
        {
            'params': model.module.pmg.features.parameters(),
            'lr': lr * 1
        },
        {
            'params': model.module.pmg.classifier1.parameters(),
            'lr': lr * 10
        },
        {
            'params': model.module.pmg.conv_block1.parameters(),
            'lr': lr * 10
        },
        {
            'params': model.module.pmg.conv_block2.parameters(),
            'lr': lr * 10
        },
        {
            'params': model.module.pmg.conv_block3.parameters(),
            'lr': lr * 10
        },
        {
            'params': model.module.pmg.map1.parameters(),
            'lr': lr * 10
        },
        {
            'params': model.module.pmg.map2.parameters(),
            'lr': lr * 10
        },
    ],
                               lr=lr,
                               momentum=0.9,
                               weight_decay=5e-4)

    # torch.optim.lr_scheduler.StepLR(optimzer, 10, gamma=0.94, last_epoch=-1)
    torch.optim.lr_scheduler.CosineAnnealingLR(optimzer, T_max=10)
    epochs = 200
    batchSize = 15
    worker = 2
    modelConfig = {
        'model': model,
        'criterion': criterion,
        'optimzer': optimzer,
        'epochs': epochs,
        'device': device
    }

    from torchvision import transforms as T
    # 自定义数据增强方式
    # normalize 加快收敛
    # normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    trainTransforms = T.Compose([
        T.Resize(256),
        T.RandomRotation(15),
        # T.RandomResizedCrop(224,scale=(0.85,1.15)),
        T.RandomCrop(224),
        T.ToTensor(),
        T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])

    testTransforms = T.Compose([
        T.Resize(256),
        T.CenterCrop(224),
        T.ToTensor(),
        T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])

    trainLoader, testLoader, validLoader, trainLength, testLength, validLength = chooseData(
        'STANFORDDOGS', batchSize, worker, trainTransforms, testTransforms)
    # 没有验证集,所以使用测试集来做验证集
    dataConfig = {
        'trainLoader': trainLoader,
        'validLoader': testLoader,
        'trainLength': trainLength,
        'validLength': testLength
    }

    modelPath = os.path.join(os.getcwd(), 'checkpoints', '_stanforddogs.pth')
    lastModelPath = os.path.join(os.getcwd(), 'checkpoints',
                                 '_stanforddogs_last.pth')
    historyPath = os.path.join(os.getcwd(), 'historys', '_stanforddogs.npy')
    logPath = os.path.join(os.getcwd(), 'logs', '_stanforddogs.txt')

    logConfig = {
        'modelPath': modelPath,
        'historyPath': historyPath,
        'logPath': logPath,
        'lastModelPath': lastModelPath
    }

    train(modelConfig, dataConfig, logConfig)