Exemplo n.º 1
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # 下载mnist训练集,划分为训练集和测试集(然后进行分组),保存到MNIST文件夹下面
    train_loader, valid_loader = get_mnist_train_validate_loader(dir_name='../data/MNIST', batch_size=MNIST_Training_Parameters['batch_size'],valid_size=0.1, shuffle=True)
    # 下载minst测试集(然后进行分组),保存到MNIST文件夹下面
    test_loader = get_mnist_test_loader(dir_name='../data/MNIST', batch_size=MNIST_Training_Parameters['batch_size'])
    # 设置模型
    # **************引入的模型名称**************
    mnist_model = MNIST_CNN().to(device)
    # 设置优化器
    optimizer = optim.SGD(mnist_model.parameters(), lr=MNIST_Training_Parameters['learning_rate'],
                          momentum=MNIST_Training_Parameters['momentum'], weight_decay=MNIST_Training_Parameters['decay'], nesterov=True)
    # 训练
    # 最好的验证集精度
    best_val_acc = None
    # 训练模型参数保存路径:/MNIST/model/MNIST_raw.pt
    # **************不同模型需要修改名称**************
    model_saver = '../data/MNIST/model/MART_MNIST_' + 'raw' + '.pt'
    # 进行epoch次循环训练
    for epoch in range(MNIST_Training_Parameters['num_epochs']):
        # 一次epoch训练
        train_one_epoch(model=mnist_model, train_loader=train_loader, optimizer=optimizer, epoch=epoch, device=device)
        # 验证集的精度
        val_acc = validation_evaluation(model=mnist_model, validation_loader=valid_loader, device=device)
        adjust_MNIST_learning_rate(optimizer=optimizer, epoch=epoch)
        # 每一次epoch后验证集的精度大于最好的精度时(移除模型保存路径),或者best_val_acc为None时,更新最佳精度,然后将模型参数重新写入保存路径中
        if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
            if best_val_acc is not None:
                os.remove(model_saver)
            best_val_acc = val_acc
            mnist_model.save(name=model_saver)
        # 否则提示精度未发生提高
        else:
            print('Train Epoch{:>3}: validation dataset accuracy did not improve from {:.4f}\n'.format(epoch, best_val_acc))
    # 测试
    # 复制mnist_model
    final_model = copy.deepcopy(mnist_model)
    # 加载final_model
    final_model.load(path=model_saver, device=device)
    # 计算模型在测试集上面的精度并输出
    accuracy = testing_evaluation(model=final_model, test_loader=test_loader, device=device)
    # 打印模型在测试集上的精度
    print('Finally, the ACCURACY of saved model [{}] on testing dataset is {:.2f}%\n'.format(final_model.model_name, accuracy * 100.0))
Exemplo n.º 2
0
    def train_external_model_group(self, train_loader=None, validation_loader=None):
        # train_loader:训练集
        # validation_loader:验证集
        # model_group:MNIST/CIFAR10的网络,各有4个
        if self.Dataset == 'MNIST':
            model_group = [MNIST_A(), MNIST_B(), MNIST_C(), MNIST_D()]
        else:
            model_group = [CIFAR10_A(), CIFAR10_B(), CIFAR10_C(), CIFAR10_D()]
        model_group = [model.to(self.device) for model in model_group]

        # 一个一个训练MNIST/CIFAR10的网络
        for i in range(len(model_group)):
            # 为MNIST/CIFAR10准备优化器,MNIST网络的优化器都为SGD,CIFAR10的最后一个网络优化器为Adam,其余的都为SGD优化器
            if self.Dataset == "MNIST":
                optimizer_external = optim.SGD(model_group[i].parameters(), lr=self.training_parameters['learning_rate'],
                                               momentum=self.training_parameters['momentum'], weight_decay=self.training_parameters['decay'],
                                               nesterov=True)
            else:
                # 可进行调整
                if i == 3:
                    optimizer_external = optim.SGD(model_group[i].parameters(), lr=0.001, momentum=0.9, weight_decay=1e-6)
                else:
                    optimizer_external = optim.Adam(model_group[i].parameters(), lr=self.training_parameters['lr'])
            print('\nwe are training the {}-th static external model ......'.format(i))
            # best_val_acc:验证集上的最佳分类精度
            best_val_acc = None
            # 对网络训练num_epochs次
            for index_epoch in range(self.num_epochs):
                # 一次完整的训练
                train_one_epoch(model=model_group[i], train_loader=train_loader, optimizer=optimizer_external, epoch=index_epoch,
                                device=self.device)
                # val_acc:每一次训练结束后,对验证集的分类精度
                val_acc = validation_evaluation(model=model_group[i], validation_loader=validation_loader, device=self.device)
                # 如果数据集为CIFAR10进行学习率调整
                if self.Dataset == 'CIFAR10':
                    adjust_learning_rate(epoch=index_epoch, optimizer=optimizer_external)
                assert os.path.exists('../DefenseEnhancedModels/{}'.format(self.defense_name))
                # defense_external_saver:最佳模型参数保存位置为DefenseEnhancedModels/NEAT/MNIST_NEAT_0.pt中,或者为CIFAR10
                defense_external_saver = '../DefenseEnhancedModels/{}/{}_NEAT_{}.pt'.format(self.defense_name, self.Dataset, str(i))
                # 计算每次训练对验证集的分类精度,将最佳的验证精度对应的模型参数进行保存
                if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
                    if best_val_acc is not None:
                        os.remove(defense_external_saver)
                    best_val_acc = val_acc
                    model_group[i].save(name=defense_external_saver)
                else:
                    print('Train Epoch {:>3}: validation dataset accuracy did not improve from {:.4f}\n'.format(index_epoch, best_val_acc))
Exemplo n.º 3
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed manually for reproducibility.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # 下载CIFAR10训练集,划分为训练集和测试集(然后进行分组),保存到CIFAR10文件夹下面
    train_loader, valid_loader = get_cifar10_train_validate_loader(
        dir_name='../data/CIFAR10/',
        batch_size=CIFAR10_Training_Parameters['batch_size'],
        valid_size=0.1,
        shuffle=True)
    # 下载CIFAR10测试集(然后进行分组),保存到CIFAR10文件夹下面
    test_loader = get_cifar10_test_loader(
        dir_name='../data/CIFAR10/',
        batch_size=CIFAR10_Training_Parameters['batch_size'])
    # 设置模型
    # **************引入的模型名称**************
    resnet_model = ResNet18().to(device)
    # 设置优化器
    optimizer = optim.Adam(resnet_model.parameters(),
                           lr=CIFAR10_Training_Parameters['lr'])
    # 训练
    # 最好的验证集精度
    best_val_acc = None
    # 训练模型参数保存路径:/CIFAR10/model/CIFAR10_raw.pt
    # **************不同模型需要修改名称**************
    model_saver = './CIFAR10/model/ResNet18_' + 'raw' + '.pt'
    # 进行epoch次循环训练
    for epoch in range(CIFAR10_Training_Parameters['num_epochs']):
        # 一次epoch训练
        train_one_epoch(model=resnet_model,
                        train_loader=train_loader,
                        optimizer=optimizer,
                        epoch=epoch,
                        device=device)
        # 验证集的精度
        val_acc = validation_evaluation(model=resnet_model,
                                        validation_loader=valid_loader,
                                        device=device)
        # 学习率调整
        adjust_learning_rate(optimizer=optimizer, epoch=epoch)
        # 每一次epoch后验证集的精度大于最好的精度时(移除模型保存路径),或者best_val_acc为None时,更新最佳精度,然后将模型参数重新写入保存路径中
        if not best_val_acc or round(val_acc, 4) >= round(best_val_acc, 4):
            if best_val_acc is not None:
                os.remove(model_saver)
            best_val_acc = val_acc
            resnet_model.save(name=model_saver)
        # 否则提示精度未发生提高
        else:
            print(
                'Train Epoch{:>3}: validation dataset accuracy did not improve from {:.4f}\n'
                .format(epoch, best_val_acc))
    # 测试
    # 复制resnet_model
    final_model = copy.deepcopy(resnet_model)
    # 加载final_model
    final_model.load(path=model_saver, device=device)
    # 计算模型在测试集上面的精度并输出
    accuracy = testing_evaluation(model=final_model,
                                  test_loader=test_loader,
                                  device=device)
    # 打印模型在测试集上的精度
    print(
        'Finally, the ACCURACY of saved model [{}] on testing dataset is {:.2f}%\n'
        .format(final_model.model_name, accuracy * 100.0))