def extract_features(image_dir, out_dir, model_dir):
    if not Path(model_dir).is_dir():
        ex_message = 'Model directory should be a folder, not a path of the model file!!'
        raise Exception(ex_message)

    data_loader = get_instance_data_loader(image_dir)

    model = densenet(embedding_size=args.embedding_size, pretrained=True)
    model = torch.nn.DataParallel(model, device_ids=[0]).cuda()
    checkpoint = torch.load(model_dir + '/test_model_best.pth.tar')
    model.load_state_dict(checkpoint['state_dict'])

    with torch.set_grad_enabled(False):
        model.eval()
        for index, (data, file_name) in enumerate(data_loader):
            output = model(data, False)
            for f, o in zip(file_name, output):
                extracted_features = pd.Series(o.data.cpu().numpy())
                if Path(out_dir).is_dir():
                    out_file = out_dir + '/' + Path(f).stem + '.dat'
                else:
                    out_file = out_dir
                if out_file == f:
                    ex_message = 'Input file name "{}" and output file name "{}" are equal!!'.format(
                        f, out_file)
                    raise Exception(ex_message)
                extracted_features.to_csv(out_file, index=False)
                print('Feature extraction is completed for file: {}'.format(
                    Path(f).name))
def predict_agument(data, **kwargs):
    """
        模型预测,单张图片复制
        :param
            data(tensor) -- 图片数据
        :kwargs

        :return(numpy)
            预测结果
    """
    # 选择运行的cpu或gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载模型
    models = []
    for k, v in kwargs.items():
        if k == '1':
            model = MFFNet()
        elif k == '2':
            # 299 x 299
            model = inception(pretrained = False)
        elif k == '3':
            model = resnet(pretrained = False)
        elif k == '4':
            model = densenet(pretrained = False)
        elif k == '5':
            model = senet(pretrained = False)
        # 加载权重
        model.load_state_dict(torch.load(v))
        model = model.to(device)
        # 测试模式
        model.eval()
        models.append(model)

    # 使用平均策略获取集成模型输出
    data = data.to(device)
    sum = None
    # 平均策略
    for model in models:
        output = model(data)
        output = output.detach()
        val = torch.zeros(7)
        for i in range(output.size(0)):
            val = val + output[i]
        val = val / output.size(0)

        if sum is None:
            sum = val
        else:
            sum += val
    val = sum / len(models)
    _, a = torch.max(val, 0)

    return a.item()
def predict(data, **kwargs):
    """
    模型预测,单张图片不复制
    :param
        data(tensor) -- 图片数据
    :kwargs

    :return(numpy)
        预测结果
    """
    # 选择运行的cpu或gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载模型
    models = []
    for k, v in kwargs.items():
        if k == '1':
            model = MFFNet()
        elif k == '2':
            # 299 x 299
            model = inception(pretrained = False)
        elif k == '3':
            model = resnet(pretrained = False)
        elif k == '4':
            model = densenet(pretrained = False)
        elif k == '5':
            model = senet(pretrained = False)
        # 加载权重
        model.load_state_dict(torch.load(v))
        model = model.to(device)
        # 测试模式
        model.eval()
        models.append(model)

    # 使用平均策略获取集成模型输出
    data = data.to(device)
    output = None
    # 平均策略
    for model in models:
        if output is None:
            output = model(data).detach()
        else:
            output += model(data).detach()
    output = output / len(models)
    _, a = torch.max(output, 1)
    a = a.cpu().detach().numpy()

    # 预测结果
    return a
예제 #4
0
def test(test_path, agumentation, **kwargs):
    """
    测试模型性能
    :param
        test_path(str) -- 测试集地址
        agumentation(bool) -- 是否对单个图片多次复制
    :kwargs
        model(int) -- 模型
    """
    # 设置超参数
    if agumentation:
        BATCH_SIZE = 1
    else:
        BATCH_SIZE = 32

    # 选择运行的cpu或gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 定义损失函数
    # N / n,权重为各类别频率的倒数
    weight = torch.Tensor([9., 1.5, 19.48, 30.62, 9.11, 86.86, 71.])
    weight = weight.to(device)
    criterion = nn.CrossEntropyLoss(weight=weight)

    # 数据处理
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    test_transform = transforms.Compose(
        [transforms.RandomCrop(224),
         transforms.ToTensor(), normalize])

    # 加载数据
    # 定义test_loader
    test_dataset = SkinDiseaseDataset(test_path,
                                      transforms=test_transform,
                                      agumentation=agumentation)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

    # 加载模型
    if kwargs['model'] == 1:
        model = MFFNet()
    elif kwargs['model'] == 2:
        # 299 x 299
        model = inception(pretrained=False)
    elif kwargs['model'] == 3:
        model = resnet(pretrained=False)
    elif kwargs['model'] == 4:
        model = densenet(pretrained=False)
    elif kwargs['model'] == 5:
        model = senet(pretrained=False)
    # 加载模型权重
    model.load_state_dict(torch.load(CONFIG.best_model))
    model = model.to(device)

    # 测试模式
    model.eval()
    # 各类别预测正确个数
    class_correct = list(0. for i in range(7))
    # 各类别总个数
    class_total = list(0. for i in range(7))
    # 损失
    sum_loss = 0.0
    # 总预测正确个数
    correct = 0
    # 总个数
    total = 0
    # 总迭代次数
    cnt = 0
    # 测试集增强模式
    if agumentation:
        # 预测标签情况
        x = []
        # 真实标签情况
        y = []
        for data in test_loader:
            cnt += 1

            # 加载数据
            image, label = data
            image = image.view(-1, 3, 224, 224)
            label = label[0]
            image, label = image.to(device), label.to(device)

            # 前向传播
            output = model(image)

            # 使用平均策略获取预测值
            output = output.detach()
            # 平均策略
            val = None
            for i in range(output.size(0)):
                if val is None:
                    val = output[i]
                else:
                    val = val + output[i]
            val = val / output.size(0)
            _, a = torch.max(val, 0)

            # 统计各个类预测正确的个数
            m = label.detach()
            class_correct[m] += 1 if a == m else 0
            class_total[m] += 1
            # 统计预测正确总个数
            correct += 1 if a == m else 0

            x.append(a.item())
            y.append(m.item())
        # list转化为numpy
        x = np.array(x)
        y = np.array(y)
    else:
        # 预测标签情况
        x = None
        # 真实标签情况
        y = None
        for data in test_loader:
            cnt += 1

            # 加载数据
            image, label = data
            image, label = image.to(device), label.to(device)

            # 前向传播
            output = model(image)
            loss = criterion(output, label)

            # 计算loss和acc
            sum_loss += loss.item()
            _, a = torch.max(output.detach(), 1)
            b = label.detach()
            total += label.size(0)
            correct += (a == b).sum()

            # 预测和真实标签情况
            if x is None:
                x = a
                y = b
            else:
                x = torch.cat((x, a))
                y = torch.cat((y, b))

            # 统计每个类别的正确预测情况
            for i in range(label.size(0)):
                m = b[i]
                class_correct[m] += 1 if a[i] == m else 0
                class_total[m] += 1
        # tensor转化为numpy
        x = x.cpu().detach().numpy()
        y = y.cpu().detach().numpy()

    # 打印结果
    cm_plot_labels = ['MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC']
    # 判断测试集是否增强
    if agumentation:
        # 打印acc
        print("test_acc:%.2f%%\n" % (100 * correct / cnt))
    else:
        # 打印loss和acc
        print("test_loss:%.2f test_acc:%.2f%%\n" %
              (sum_loss / cnt, 100 * correct / total))
    # 打印每个类别的acc
    for i in range(7):
        if class_total[i] > 0:
            print('Test Accuracy of %5s: %.2f%% (%2d/%2d)' %
                  (cm_plot_labels[i], 100 * class_correct[i] / class_total[i],
                   class_correct[i], class_total[i]))
        else:
            print('Test Accuracy of %5s: N/A (no training examples)' %
                  cm_plot_labels[i])
    print('')

    # 计算混淆矩阵
    cm = confusion_matrix(y, x)
    print('')

    # 计算BMC
    balanced_multiclass_accuracy(cm)
    print('')

    # 可视化混淆矩阵
    plot_confusion_matrix(cm, cm_plot_labels, title='Confusion Matrix')
    print('')

    # 打印分类报告
    report = classification_report(y, x, target_names=cm_plot_labels)
    print(report)
예제 #5
0
def main(batch_size, epoch, dataset_name, model_name, seq=False):
    print('loading dataset & creating dataloaders')
    file_path = Path(__file__)
    datasets_path = (file_path / '..' / '..' / 'datasets').resolve()

    if dataset_name == "cifar10":
        num_classes = 10
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    elif dataset_name == "kinetics400":
        num_classes = 400
        transform = transforms.Compose([
            transforms_3d.ToFloatTensorInZeroOne(),
            transforms_3d.Resize((128, 171)),
            transforms_3d.RandomHorizontalFlip(),
            transforms_3d.Normalize(mean=[0.43216, 0.394666, 0.37645],
                                    std=[0.22803, 0.22145, 0.216989]),
            transforms_3d.RandomCrop((112, 112))
        ])
    dataloaders = dataloader(dataset_name=dataset_name,
                             transform=transform,
                             batch_size=batch_size,
                             num_worker=0,
                             seq=seq,
                             datasets_path=datasets_path)
    print("creating model")
    if model_name == 'simple_model1':
        model = simple_model1(num_classes)
    elif model_name == 'simple_model2':
        model = simple_model2(num_classes)
    elif model_name == 'densenet':
        model = densenet(num_classes)
    elif model_name == 'resnet3d':
        model = resnet3d(num_classes)
    else:
        exit(1)

    print("cuda settings")
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    print('optimizer and criterion setting')
    # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    optimizer = optim.Adam(model.parameters())
    # criterion = nn.MultiLabelSoftMarginLoss()
    weight = torch.ones([num_classes]).to(device)
    criterion = nn.BCEWithLogitsLoss(weight=weight)

    print('discription setting')
    outputs_path = (file_path / '..' / '..' / 'outputs').resolve()
    output_dir = model_name + '-' + dataset_name
    output_path = outputs_path / output_dir
    output_dir_index = 0
    while os.path.isdir(output_path):
        output_dir_index += 1
        output_path = outputs_path / (output_dir + '-' + str(output_dir_index))
    os.makedirs(output_path, exist_ok=True)

    train_discription_path = str(output_path / 'train_discription.txt')
    with open(train_discription_path, mode="w") as f:
        pprint(model_name, stream=f)
        pprint(dataset_name, stream=f)
        pprint('batch_size: ' + str(batch_size), stream=f)
        pprint('epoch: ' + str(epoch), stream=f)

        pprint('transform', stream=f)
        pprint(transform, stream=f)

        pprint('criterion', stream=f)
        pprint(criterion, stream=f)

        pprint('optimizer', stream=f)
        pprint(optimizer, stream=f)

    print('start training')
    train_result_txt_path = str(output_path / 'train_result.txt')
    train_result_dict_path = str(output_path / 'train_result.joblib')
    model, hist = train_model(model=model,
                              dataloaders=dataloaders,
                              criterion=criterion,
                              optimizer=optimizer,
                              num_classes=num_classes,
                              num_epochs=epoch,
                              device=device,
                              output_path=output_path,
                              train_result_txt_path=train_result_txt_path,
                              train_result_dict_path=train_result_dict_path)
    print(hist)
예제 #6
0
transform = transforms.Compose([transforms.Resize((224,224)),
                            transforms.ToTensor(),
                            transforms.Normalize([0.485,0.456,  
                            0.406], [0.229, 0.224, 0.225])])
trainset = datasets.CIFAR10('train/', download=True, train=True, transform=transform)
valset = datasets.CIFAR10('val/', download=True, train=False, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=64, shuffle=True)
len_trainset = len(trainset)
len_valset = len(valset)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")




student1 = mod.densenet()
student1.to(device)
student1.load_state_dict(torch.load('densenetV2.pth'))

student2 = mod.googlenet()
student2.to(device)
student2.load_state_dict(torch.load('googlenetV2.pth'))

selector = mod.CNN()
selector.to(device)
classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
device = ("cuda:0" if torch.cuda.is_available() else "cpu")
classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
예제 #7
0
def process():
    """
    响应对上传图像进行色素性皮肤病七分类预测请求,
    以json格式返回结果
    """
    # 单张图片数据增强后的图片个数
    NUM = 16
    # 模型权重的地址
    KWARGS = {'1': CONFIG.densenet_model, '2': CONFIG.senet_model}

    try:
        # 存储上传图像的目录地址
        UPLOAD_FOLDER = CONFIG.upload_dir

        ## 读取并保存上传图像 ##
        file = request.files['file']
        filename = secure_filename(file.filename)
        # 图像存储路径
        path = os.path.join(UPLOAD_FOLDER, filename)
        file.save(path)

        # 选择设备
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        ## 图像处理 ##
        # 224 x 224
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        transform = transforms.Compose(
            [transforms.RandomCrop(224),
             transforms.ToTensor(), normalize])
        # 加载图片
        img = Image.open(path)
        # 裁剪图片
        img = img.resize((300, 300))
        # 数据增强后的所有图片
        imgs = None
        for i in range(NUM):
            if imgs is None:
                imgs = transform(img).view(1, 3, 224, 224)
            else:
                imgs = torch.cat((imgs, transform(img).view(1, 3, 224, 224)),
                                 0)

        ## 加载模型 ##
        models = []
        for k, v in KWARGS.items():
            if k == '1':
                model = densenet(pretrained=False)
            elif k == '2':
                model = senet(pretrained=False)
            model.load_state_dict(torch.load(v, map_location='cpu'))
            model = model.to(device)
            model.eval()
            models.append(model)

        ## 预测结果 ##
        imgs = imgs.to(device)
        # 所有模型预测结果之和
        sum = None
        # 集成学习平均策略
        for model in models:
            # 预测结果
            output = model(imgs)

            output = output.detach()
            # 平均策略
            val = None
            for i in range(output.size(0)):
                if val is None:
                    val = output[i]
                else:
                    val = val + output[i]
            val = val / output.size(0)

            if sum is None:
                sum = val
            else:
                sum += val
        val = sum / len(models)
        _, a = torch.max(val, 0)
        # 预测结果
        a = a.item()

        ## 返回结果 ##
        # 七类色素性皮肤病名称
        classes = ['MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC']
        # 预测结果
        predict = classes[a]
        results_json = {}
        results_json['result'] = predict
        results_json['status'] = 'Success'
        return jsonify(results_json)
    except Exception:
        results_json = {}
        results_json['status'] = 'Failure'
        return jsonify(results_json)
예제 #8
0
def train(train_path, val_path, **kwargs):
    """
    训练模型
    :param
        train_path(str) -- 训练集地址
        val_path(str) -- 验证集地址
    : kwargs
        model(int) -- 训练模型
        epoch(int) -- 训练轮数
        batch_size(int) -- 训练批次大小
        learn_rate(int) -- 学习率
    :return
        返回训练集损失(list)、验证集损失(list)
    """
    # 设置超参数
    lrs = [1e-3, 1e-4, 1e-5]
    # 学习率
    LR = lrs[kwargs['learn_rate'] - 1]
    # 训练轮数
    EPOCH = kwargs['epoch']
    # 批次大小
    BATCH_SIZE = kwargs['batch_size']

    # 数据处理
    normalize = transforms.Normalize(mean=[0.76209545, 0.54330575, 0.5679443],
                                     std=[0.14312604, 0.154518, 0.17225058])
    train_transform = transforms.Compose([
        transforms.RandomCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.RandomRotation(degrees=180),
        transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1),
        transforms.ToTensor(), normalize
    ])
    val_transform = transforms.Compose(
        [transforms.RandomCrop(224),
         transforms.ToTensor(), normalize])

    # 加载数据
    #定义trainloader
    train_dataset = SkinDiseaseDataset(train_path,
                                       transforms=train_transform,
                                       agumentation=False)
    train_loader = DataLoader(
        train_dataset, batch_size=BATCH_SIZE,
        shuffle=True)  # sampler = ImbalancedDatasetSampler(train_dataset)

    #定义valloader
    val_dataset = SkinDiseaseDataset(val_path,
                                     transforms=val_transform,
                                     agumentation=False)
    val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE)

    # 选择运行的cpu或gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载模型
    if kwargs['model'] == 1:
        model = MFFNet()
    elif kwargs['model'] == 2:
        # 299 x 299
        model = inception()
    elif kwargs['model'] == 3:
        model = resnet()
    elif kwargs['model'] == 4:
        model = densenet()
    elif kwargs['model'] == 5:
        model = senet()
    # # 断点训练,加载模型权重
    # model.load_state_dict(torch.load(CONFIG.best_model))
    model = model.to(device)

    # 定义损失函数
    # N / n,权重为各类别频率的倒数
    weight = torch.Tensor([9., 1.5, 19.48, 30.62, 9.11, 86.86, 71.])
    weight = weight.to(device)
    criterion = nn.CrossEntropyLoss(weight=weight)

    # 定义优化器
    # optimizer = optim.SGD(model.parameters(), lr = LR, weight_decay = 1e-5)
    optimizer = optim.Adam(model.parameters(), lr=LR, weight_decay=1e-5)

    # 定义学习率调度策略
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     patience=7,
                                                     verbose=True)

    # 可视化模型
    if kwargs['model'] == 2:
        summary(model, (3, 299, 299))
    else:
        summary(model, (3, 224, 224))
    print(model)

    # 在模型训练过程中,跟踪每一轮平均训练集损失
    avg_train_losses = []
    # 在模型训练过程中,跟踪每一轮平均验证集损失
    avg_valid_losses = []

    # EarlyStopping机制
    early_stopping = EarlyStopping(patience=12, verbose=True)

    # 训练模型
    for epoch in range(EPOCH):
        # 训练模式
        model.train()
        # 损失
        sum_loss = 0.0
        # 预测正确样本数
        correct = 0
        # 总样本数
        total = 0
        # 迭代次数
        cnt = 0
        for data in train_loader:
            cnt += 1

            # 加载数据
            image, label = data
            image, label = image.to(device), label.to(device)

            # 梯度置零
            optimizer.zero_grad()

            # 前向传播、后向传播
            output = model(image)
            loss = criterion(output, label)
            # inceptionV3
            # loss = criterion(output, label) + 0.4 * criterion(aux, label)
            loss.backward()
            optimizer.step()

            # 计算loss and acc
            sum_loss += loss.item()
            _, a = torch.max(output.detach(), 1)
            b = label.detach()
            total += label.size(0)
            correct += (a == b).sum()
        # 打印loss和acc
        print('[ %d/%d ] train_loss:%.2f train_acc:%.2f%%' %
              (epoch + 1, EPOCH, sum_loss / cnt, 100 * correct / total))
        avg_train_losses.append(sum_loss / cnt)

        # 验证模式
        model.eval()
        # 损失
        sum_loss = 0.0
        # 预测正确样本数
        correct = 0
        # 总样本数
        total = 0
        # 迭代次数
        cnt = 0
        for data in val_loader:
            cnt += 1

            # 加载数据
            image, label = data
            image, label = image.to(device), label.to(device)

            # 前向传播
            output = model(image)
            loss = criterion(output, label)

            # 计算loss和acc
            sum_loss += loss.item()
            _, a = torch.max(output.detach(), 1)
            b = label.detach()
            total += label.size(0)
            correct += (a == b).sum()
        # 打印loss和acc
        print("          val_loss:%.2f val_acc:%.2f%%" %
              (sum_loss / cnt, 100 * correct / total))
        avg_valid_losses.append(sum_loss / cnt)

        # earlyStopping机制
        early_stopping(sum_loss / cnt, model)
        # 学习率调度机制
        scheduler.step(sum_loss / cnt)

        # 保存模型
        torch.save(
            model.state_dict(),
            CONFIG.intermediate_model + '/checkpoint_%d.pt' % (epoch + 1))

        # 判断是否停止训练
        if early_stopping.early_stop:
            print("Early stopping")
            break

    return avg_train_losses, avg_valid_losses
예제 #9
0
                                transforms.Normalize([0.485,0.456,  
                                0.406], [0.229, 0.224, 0.225])])
trainset = datasets.CIFAR10('train/', download=True, train=True, transform=transform)
valset = datasets.CIFAR10('val/', download=True, train=False, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=64, shuffle=True)
len_trainset = len(trainset)
len_valset = len(valset)
classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

resnet = mod.resnetModel()
resnet.to(device) 
resnet.load_state_dict(torch.load('teacher.pth'))
print("Loaded The model ------>  ")

if args.model ==1:
    modelName = 'densenetV2.pth'
    print("training Densenet Student:")
    model = mod.densenet()
    model.to(device)
    student = train_and_evaluate_kd(model,resnet,optim.Adam(model.parameters()),loss_kd,trainloader,valloader,7,0.5,args.epochs)
    torch.save(student.load_state_dict(),modelName,_use_new_zipfile_serialization=False)

elif args.model ==2:
    modelName = 'googlenetV2.pth'
    student2 = mod.googlenet()
    student = train_and_evaluate_kd(student2,resnet,optim.Adam(model.parameters()),loss_kd,trainloader,valloader,7,0.5,args.epochs)
    torch.save(student.load_state_dict(),modelName,_use_new_zipfile_serialization=False)

def test(test_path, agumentation, **kwargs):
    """
        测试集成模型性能
        :param
            test_path(str) -- 测试集地址
            agumentation(bool) -- 是否对单个图片多次复制
        :kwargs
            model(int) -- 模型
    """
    # 设置超参数
    if agumentation:
        BATCH_SIZE = 1
    else:
        BATCH_SIZE = 32

    # 选择运行的cpu或gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 数据处理
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    test_transform = transforms.Compose(
        [transforms.RandomCrop(224),
         transforms.ToTensor(), normalize])

    # 加载数据
    # 定义test_loader
    test_dataset = SkinDiseaseDataset(test_path,
                                      transforms=test_transform,
                                      agumentation=agumentation)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

    # 加载模型
    models = []
    for k, v in kwargs.items():
        if k == '1':
            model = MFFNet()
        elif k == '2':
            # 299 x 299
            model = inception(pretrained=False)
        elif k == '3':
            model = resnet(pretrained=False)
        elif k == '4':
            model = densenet(pretrained=False)
        elif k == '5':
            model = senet(pretrained=False)
        model.load_state_dict(torch.load(v))
        model = model.to(device)
        # 测试模式
        model.eval()
        models.append(model)

    # 测试模型
    # 各类别预测正确个数
    class_correct = list(0. for i in range(7))
    # 各类别总个数
    class_total = list(0. for i in range(7))
    # 总预测正确个数
    correct = 0
    # 总个数
    total = 0
    # 总迭代次数
    cnt = 0
    # 测试集增强模式
    if agumentation:
        # 预测标签情况
        x = []
        # 真实标签情况
        y = []
        for data in test_loader:
            cnt += 1

            # 加载数据
            image, label = data
            image = image.view(-1, 3, 224, 224)
            label = label[0]
            image, label = image.to(device), label.to(device)

            # 使用平均策略获取预测值,即最终的输出为各模型输出和的平均
            sum = None
            # 平均策略
            for model in models:
                output = model(image)

                # 使用平均策略获取模型的输出
                output = output.detach()
                # 平均策略
                val = None
                for i in range(output.size(0)):
                    if val is None:
                        val = output[i]
                    else:
                        val = val + output[i]
                val = val / output.size(0)

                if sum is None:
                    sum = val
                else:
                    sum += val
            val = sum / len(models)
            _, a = torch.max(val, 0)

            # 统计各个类预测正确的个数
            m = label.detach()
            class_correct[m] += 1 if a == m else 0
            correct += 1 if a == m else 0
            class_total[m] += 1

            x.append(a.item())
            y.append(m.item())
        # list转化为numpy
        x = np.array(x)
        y = np.array(y)
    else:
        # 预测标签情况
        x = None
        # 真实标签情况
        y = None
        for data in test_loader:
            cnt += 1

            # 加载数据
            image, label = data
            image, label = image.to(device), label.to(device)

            # 使用平均策略,获取输出,即最终的输出为各模型输出和的平均
            output = None
            # 平均策略
            for model in models:
                if output is None:
                    output = model(image).detach()
                else:
                    output += model(image).detach()
            output = output / len(models)

            # acc
            _, a = torch.max(output, 1)
            b = label.detach()
            total += label.size(0)
            correct += (a == b).sum()

            # 预测和真实标签情况
            if x is None:
                x = a
                y = b
            else:
                x = torch.cat((x, a))
                y = torch.cat((y, b))

            # 统计每个类别的正确预测情况
            for i in range(label.size(0)):
                m = b[i]
                class_correct[m] += 1 if a[i] == m else 0
                class_total[m] += 1
        # tensor转化为numpy
        x = x.cpu().detach().numpy()
        y = y.cpu().detach().numpy()

    # 打印结果
    cm_plot_labels = ['MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC']
    # 打印acc
    print("test_acc:%.2f%%\n" % (100 * correct / total))
    # 打印每个类别的acc
    for i in range(7):
        if class_total[i] > 0:
            print('Test Accuracy of %5s: %.2f%% (%2d/%2d)' %
                  (cm_plot_labels[i], 100 * class_correct[i] / class_total[i],
                   class_correct[i], class_total[i]))
        else:
            print('Test Accuracy of %5s: N/A (no training examples)' %
                  cm_plot_labels[i])
    print('')

    # 计算混淆矩阵
    cm = confusion_matrix(y, x)
    print('')

    # 计算BMC
    balanced_multiclass_accuracy(cm)
    print('')

    # 可视化混淆矩阵
    plot_confusion_matrix(cm, cm_plot_labels, title='Confusion Matrix')
    print('')

    # 打印分类报告
    report = classification_report(y, x, target_names=cm_plot_labels)
    print(report)