Пример #1
0
    def __init__(self, pathModel, nnClassCount, transCrop):

        #---- Initialize the network
        model = DenseNet121(nnClassCount).cuda()

        if use_gpu:
            model = torch.nn.DataParallel(model).cuda()
        else:
            model = torch.nn.DataParallel(model)

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        self.model = model
        self.model.eval()

        #---- Initialize the weights
        self.weights = list(
            self.model.module.densenet121.features.parameters())[-2]

        #---- Initialize the image transform
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])
        transformList = []
        transformList.append(transforms.Resize((transCrop, transCrop)))
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        self.transformSequence = transforms.Compose(transformList)
Пример #2
0
def build_model(device, model_name, num_classes=10):
    """构建模型:vgg、vggnonorm、resnet、preactresnet、googlenet、densenet、
                resnext、mobilenet、mobilenetv2、dpn、shufflenetg2、senet、shufflenetv2

    :param device: 'cuda' if you have a GPU, 'cpu' otherwise
    :param model_name: One of the models available in the folder 'models'
    :param num_classes: 10 or 100 depending on the chosen dataset
    :return: The model architecture
    """
    print('==> Building model..')
    model_name = model_name.lower()
    if model_name == 'vgg':
        net = VGG('VGG19', num_classes=num_classes)
    elif model_name == 'vggnonorm':
        net = VGG('VGG19', num_classes=num_classes, batch_norm=False)
    elif model_name == 'resnet':
        net = ResNet18(num_classes=num_classes)
    elif model_name == 'preactresnet':
        net = PreActResNet18()
    elif model_name == 'googlenet':
        net = GoogLeNet()
    elif model_name == 'densenet':
        net = DenseNet121()
    elif model_name == 'resnext':
        net = ResNeXt29_2x64d()
    elif model_name == 'mobilenet':
        net = MobileNet()
    elif model_name == 'mobilenetv2':
        net = MobileNetV2()
    elif model_name == 'dpn':
        net = DPN92()
    elif model_name == 'shufflenetg2':
        net = ShuffleNetG2()
    elif model_name == 'senet':
        net = SENet18()
    elif model_name == 'shufflenetv2':
        net = ShuffleNetV2(1)
    else:
        raise ValueError('Error: the specified model is incorrect ({})'.format(model_name))

    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True
    return net
def save_features(**kwargs):
    config.parse(kwargs)

    # prepare data  # 需要先把BasicDataset里的shuffle和交换label注释掉,同时trans只进行到归一化,将后面变为ImageNet的分布的部分注释掉,前面图片的翻转旋转注释掉
    train_data = Vertebrae_Dataset(config.data_root,
                                   config.train_paths,
                                   phase='train',
                                   balance=False)
    train_dataloader = DataLoader(train_data,
                                  batch_size=config.batch_size,
                                  shuffle=False,
                                  num_workers=config.num_workers)
    print('Training Images:', train_data.__len__())

    # test_data = Vertebrae_Dataset(config.data_root, config.test_paths, phase='test', balance=False)
    # test_dataloader = DataLoader(test_data, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
    # print('Test Images:', test_data.__len__())

    # prepare model
    # model = ResNet34(num_classes=4)
    model = DenseNet121(num_classes=config.num_classes)
    # model = CheXPre_DenseNet121(num_classes=config.num_classes)

    if config.load_model_path:
        model.load(config.load_model_path)
    if config.use_gpu:
        model.cuda()
    model.eval()

    for image, label, image_path in tqdm(train_dataloader):
        img = Variable(image, volatile=True)
        target = Variable(label)
        if config.use_gpu:
            img = img.cuda()
            target = target.cuda()

        model.save_feature(
            img,
            target,
            image_path,
            feature_folder=
            '/DATA5_DB8/data/bllai/Data/Features_Horizontal_Vertical')
Пример #4
0
def DenseNet121(shape, num_classes, last_activation):
    base_model = DenseNet121Model.DenseNet(weights='imagenet', input_shape=shape)
    x = base_model.output
    x = Dense(num_classes, activation=last_activation)(x)
    model = Model(inputs=base_model.input, outputs=x)
    return model
def test_output(**kwargs):
    config.parse(kwargs)

    # prepare data
    test_data = Vertebrae_Dataset(
        config.data_root, config.test_paths,
        phase='test_output')  # 注意这里不要加balance=False,否则生成的Dataset会包含混合型
    test_dataloader = DataLoader(test_data,
                                 batch_size=config.batch_size,
                                 shuffle=False,
                                 num_workers=config.num_workers)
    # test_data = FrameDiff_Dataset(config.data_root, config.test_paths, phase='test_output')  # 注意这里不要加balance=False,否则生成的Dataset会包含混合型
    # test_dataloader = DataLoader(test_data, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
    print('Test Image:', test_data.__len__())

    # prepare model
    # model = ResNet34(num_classes=config.num_classes)
    model = DenseNet121(num_classes=config.num_classes)
    # model = CheXPre_DenseNet121(num_classes=config.num_classes)

    if config.load_model_path:
        model.load(config.load_model_path)
    if config.use_gpu:
        model.cuda()
    model.eval()

    softmax = functional.softmax
    misclassified, results = [], []

    # go through the model
    for i, (image, label, image_path) in tqdm(enumerate(test_dataloader)):
        img = Variable(image, volatile=True)
        target = Variable(label)
        if config.use_gpu:
            img = img.cuda()
            target = target.cuda()

        score = model(img)

        # collect results of each slice
        for path, predicted, true_label, prob in zip(
                image_path, np.argmax(softmax(score, dim=1).data, 1), target,
                softmax(score, dim=1).data):
            if int(predicted
                   ) == 2:  # 将预测的成骨型的类别从2变为3,保证csv文件中真实label和预测labe的一致性
                predicted = 3
            results.append((path.split('VertebraeData')[1], int(predicted),
                            int(true_label), prob[0], prob[1], prob[2]))
            if predicted != int(true_label):
                misclassified.append(
                    (path.split('VertebraeData')[1], int(predicted),
                     int(true_label), prob[0], prob[1], prob[2]))

    write_csv(
        os.path.join('results', config.results_file),
        ['image_path', 'predict', 'true_label', 'prob_1', 'prob_2', 'prob_3'],
        results)
    write_csv(
        os.path.join('results', config.misclassified_file),
        ['image_path', 'predict', 'true_label', 'prob_1', 'prob_2', 'prob_3'],
        misclassified)
Пример #6
0
def model_gen_fun():
    model = DenseNet121(num_classes=1, num_channels=1).eval()
    return model
Пример #7
0
def estimate(X_train, y_train):
    i = 0
    ii = 0
    nrows = 256
    ncolumns = 256
    channels = 1
    ntrain = 0.85 * len(X_train)
    nval = 0.15 * len(X_train)
    batch_size = 20
    epochs = 2
    # Number of classes
    num_cpu = multiprocessing.cpu_count()
    num_classes = 2
    torch.manual_seed(8)
    torch.cuda.manual_seed(8)
    np.random.seed(8)
    random.seed(8)

    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    X = []
    X_train = np.reshape(np.array(X_train), [
        len(X_train),
    ])
    for img in list(range(0, len(X_train))):
        if X_train[img].ndim >= 3:
            X.append(
                np.moveaxis(
                    cv2.resize(X_train[img][:, :, :3], (nrows, ncolumns),
                               interpolation=cv2.INTER_CUBIC), -1, 0))
        else:
            smimg = cv2.cvtColor(X_train[img], cv2.COLOR_GRAY2RGB)
            X.append(
                np.moveaxis(
                    cv2.resize(smimg, (nrows, ncolumns),
                               interpolation=cv2.INTER_CUBIC), -1, 0))

        if y_train[img] == 'COVID':
            y_train[img] = 1
        elif y_train[img] == 'NonCOVID':
            y_train[img] = 0
        else:
            continue

    x = np.array(X)
    y_train = np.array(y_train)

    outputs_all = []
    labels_all = []

    X_train, X_val, y_train, y_val = train_test_split(x,
                                                      y_train,
                                                      test_size=0.15,
                                                      random_state=0)

    image_transforms = {
        'train':
        transforms.Compose([
            transforms.Lambda(lambda x: x / 255),
            transforms.ToPILImage(),
            transforms.Resize((230, 230)),
            transforms.RandomResizedCrop((224), scale=(0.5, 1.0)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(10),
            #transforms.ColorJitter(brightness=0.2, contrast=0.2),
            transforms.ToTensor(),
            transforms.Normalize([0.45271412, 0.45271412, 0.45271412],
                                 [0.33165374, 0.33165374, 0.33165374])
        ]),
        'valid':
        transforms.Compose([
            transforms.Lambda(lambda x: x / 255),
            transforms.ToPILImage(),
            transforms.Resize((230, 230)),
            transforms.CenterCrop(size=224),
            transforms.ToTensor(),
            transforms.Normalize([0.45271412, 0.45271412, 0.45271412],
                                 [0.33165374, 0.33165374, 0.33165374])
        ])
    }

    train_data = MyDataset(X_train, y_train, image_transforms['train'])

    valid_data = MyDataset(X_val, y_val, image_transforms['valid'])

    dataset_sizes = {'train': len(train_data), 'valid': len(valid_data)}

    dataloaders = {
        'train':
        data.DataLoader(train_data,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=num_cpu,
                        pin_memory=True,
                        worker_init_fn=np.random.seed(7),
                        drop_last=False),
        'valid':
        data.DataLoader(valid_data,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=num_cpu,
                        pin_memory=True,
                        worker_init_fn=np.random.seed(7),
                        drop_last=False)
    }

    modelA = DenseNet121(num_classes, pretrained=True)
    num_ftrs1 = modelA.fc.in_features
    checkpoint0 = torch.load('Model_densenet121_state.pth', map_location='cpu')
    modelA.load_state_dict(checkpoint0)

    modelC = ResidualAttentionModel(2)
    num_ftrs2 = modelC.fc.in_features
    checkpoint0 = torch.load('Model_residual_state.pth', map_location='cpu')
    modelC.load_state_dict(checkpoint0)

    model = MyEnsemble(modelA, modelC, num_ftrs1, num_ftrs2)

    for param in modelC.parameters():
        param.requires_grad_(False)

    for param in modelA.parameters():
        param.requires_grad_(False)

    model = nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
    criterion = nn.CrossEntropyLoss()
    #optimizer = optim.SGD(model.parameters(), lr=0.006775, momentum=0.5518,weight_decay=0.000578)
    #optimizer = optim.SGD(model.parameters(), lr=0.006775, momentum=0.5518,weight_decay=0.000578)
    optimizer = optim.Adam(model.parameters(), lr=0.0001, weight_decay=0.05)
    #scheduler =  lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=35, gamma=0.1)

    best_acc = -1
    best_f1 = 0.0
    best_epoch = 0
    best_loss = 100000
    since = time.time()
    writer = SummaryWriter()

    model.train()

    for epoch in range(epochs):
        print('epoch', epoch)
        jj = 0
        for phase in ['train', 'valid']:
            if phase == 'train':
                model.train()  # Set model to training mode
            else:
                model.eval()  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0
            predictions = FloatTensor()
            all_labels = FloatTensor()

            # Iterate over data.
            for inputs, labels in dataloaders[phase]:
                #inputs = inputs.to(device, non_blocking=True)
                inputs = inputs.to(device, non_blocking=True)
                labels = labels.to(device, non_blocking=True)
                predictions = predictions.to(device, non_blocking=True)
                all_labels = all_labels.to(device, non_blocking=True)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                # track history if only in train
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)

                    predictions = torch.cat([predictions, preds.float()])
                    all_labels = torch.cat([all_labels, labels.float()])

                    # backward + optimize only if in training phase
                    if phase == 'train':

                        loss.backward()
                        optimizer.step()

                    if phase == 'train':
                        jj += 1

                        if len(inputs) >= 16:

                            #print('len(inputs)',len(inputs),i)
                            writer.add_figure(
                                'predictions vs. actuals epoch ' + str(epoch) +
                                ' ' + str(jj),
                                plot_classes_preds(model, inputs, labels))

            # statistics
                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            if phase == 'train':
                scheduler.step()

            epoch_f1 = f1_score(all_labels.tolist(),
                                predictions.tolist(),
                                average='weighted')
            print(phase, 'confusion_matrix',
                  confusion_matrix(all_labels.tolist(), predictions.tolist()))
            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = accuracy_score(all_labels.tolist(),
                                       predictions.tolist())

            print('{} Loss: {:.4f} Acc: {:.4f} f1: {:.4f}'.format(
                phase, epoch_loss, epoch_acc, epoch_f1))

            # Record training loss and accuracy for each phase
            if phase == 'train':
                writer.add_scalar('Train/Loss', epoch_loss, epoch)
                writer.add_scalar('Train/Accuracy', epoch_acc, epoch)

                writer.flush()
            elif phase == 'valid':
                writer.add_scalar('Valid/Loss', epoch_loss, epoch)
                writer.add_scalar('Valid/Accuracy', epoch_acc, epoch)
                writer.flush()

        # deep copy the model
            if phase == 'valid' and epoch_acc > best_acc:

                best_f1 = epoch_f1
                best_acc = epoch_acc
                best_loss = epoch_loss
                best_epoch = epoch
                best_model_wts = copy.deepcopy(model.module.state_dict())
                best_model_wts_module = copy.deepcopy(model.state_dict())

    model.load_state_dict(best_model_wts_module)
    torch.save(model, "Model_ensemble.pth")
    torch.save(best_model_wts, "Model_ensemble_state.pth")
    time_elapsed = time.time() - since

    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best valid Acc: {:4f}'.format(best_acc))
    print('Best valid f1: {:4f}'.format(best_f1))
    print('best epoch: ', best_epoch)

    model.module.classifier2 = nn.Identity()

    for param in model.parameters():
        param.requires_grad_(False)

    clf1 = svm.SVC(kernel='rbf', probability=True)
    all_best_accs = {}
    all_best_f1s = {}
    clf2 = ExtraTreesClassifier(n_estimators=40,
                                max_depth=None,
                                min_samples_split=30,
                                random_state=0)

    for phase in ['train', 'valid']:
        outputs_all = []
        labels_all = []
        model.eval()  # Set model to evaluate mode

        # Iterate over data.
        for inputs, labels in dataloaders[phase]:
            inputs = inputs.to(device, non_blocking=True)
            labels = labels.to(device, non_blocking=True)

            outputs = model(inputs)
            #print(outputs.shape)
            outputs_all.append(outputs)
            labels_all.append(labels)

        outputs = torch.cat(outputs_all)
        #print('outputss',outputs.shape)
        labels = torch.cat(labels_all)

        # fit the classifier on training set and then predict on test
        if phase == 'train':
            clf1.fit(outputs.cpu(), labels.cpu())
            clf2.fit(outputs.cpu(), labels.cpu())
            filename1 = 'classifier_SVM.sav'
            filename2 = 'classifier_ExtraTrees.sav'

            joblib.dump(clf1, filename1)
            joblib.dump(clf2, filename2)
            all_best_accs[phase] = accuracy_score(labels.cpu(),
                                                  clf1.predict(outputs.cpu()))
            all_best_f1s[phase] = f1_score(labels.cpu(),
                                           clf1.predict(outputs.cpu()))
            print(phase, 'confusion_matrix of SVM',
                  confusion_matrix(labels.cpu(), clf1.predict(outputs.cpu())))
            print(phase, 'confusion_matrix of ExtraTrees',
                  confusion_matrix(labels.cpu(), clf2.predict(outputs.cpu())))
        if phase == 'valid':
            predict = clf1.predict(outputs.cpu())
            all_best_accs[phase] = accuracy_score(labels.cpu(),
                                                  clf1.predict(outputs.cpu()))
            all_best_f1s[phase] = f1_score(labels.cpu(),
                                           clf1.predict(outputs.cpu()))
            print(phase, 'confusion_matrix of SVM',
                  confusion_matrix(labels.cpu(), clf1.predict(outputs.cpu())))
            print(phase, 'confusion_matrix of ExtraTrees',
                  confusion_matrix(labels.cpu(), clf2.predict(outputs.cpu())))

    print('Best Acc: ', all_best_accs)
    print('Best f1: ', all_best_f1s)

    return model
Пример #8
0
def predict(X_test, model_main=None):

    i = 0
    nrows = 256
    ncolumns = 256
    num_classes = 2
    bs = 20
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    modelA = DenseNet121(num_classes, pretrained=True)
    num_ftrs1 = modelA.fc.in_features

    modelB = ResidualAttentionModel(2)
    num_ftrs2 = modelB.fc.in_features

    model_main = MyEnsemble(modelA, modelB, num_ftrs1, num_ftrs2)
    checkpoint0 = torch.load("Model_ensemble_state.pth")
    model_main.load_state_dict(checkpoint0)

    for param in model_main.parameters():
        param.requires_grad_(False)

    model_main = nn.DataParallel(model_main, device_ids=[0, 1, 2, 3]).cuda()
    X_t = []
    X_test = np.reshape(np.array(X_test), [
        len(X_test),
    ])

    for img in list(range(0, len(X_test))):
        if X_test[img].ndim >= 3:
            X_t.append(
                np.moveaxis(
                    cv2.resize(X_test[img][:, :, :3], (nrows, ncolumns),
                               interpolation=cv2.INTER_CUBIC), -1, 0))
        else:
            smimg = cv2.cvtColor(X_test[img], cv2.COLOR_GRAY2RGB)
            X_t.append(
                np.moveaxis(
                    cv2.resize(smimg, (nrows, ncolumns),
                               interpolation=cv2.INTER_CUBIC), -1, 0))

    x = np.array(X_t)
    y_pred = []

    torch.manual_seed(0)
    torch.cuda.manual_seed(0)
    np.random.seed(0)
    random.seed(0)
    device = torch.device("cpu")

    model_main.eval()

    image_transforms = transforms.Compose([
        transforms.Lambda(lambda x: x / 255),
        transforms.ToPILImage(),
        transforms.Resize((230, 230)),
        transforms.CenterCrop(size=224),
        transforms.ToTensor(),
        transforms.Normalize([0.45271412, 0.45271412, 0.45271412],
                             [0.33165374, 0.33165374, 0.33165374])
    ])

    dataset = MyDataset_test(x, image_transforms)
    dataloader = DataLoader(dataset,
                            batch_size=bs,
                            pin_memory=True,
                            worker_init_fn=np.random.seed(0),
                            drop_last=False)

    for inputs in dataloader:
        #inputs = torch.from_numpy(inputs).float()
        inputs = inputs.to(device, non_blocking=True)
        outputs = model_main(inputs)
        _, preds = torch.max(outputs, 1)

        #pred = clf.predict(outputs.cpu())
        for ii in range(len(preds)):
            if preds[ii] > 0.5:
                y_pred.append('COVID')

            else:
                y_pred.append('NonCOVID')

        i += 1
        if i % math.ceil(len(X_test) / bs) == 0:
            break

    model_main.module.classifier2 = nn.Identity()

    clf = loaded_model = joblib.load('classifier_ExtraTrees.sav')
    for param in model_main.parameters():
        param.requires_grad_(False)
    y_pred2 = []
    for inputs in dataloader:
        inputs = inputs.to(device, non_blocking=True)
        outputs = model_main(inputs)
        preds = clf.predict(outputs.cpu())

        for ii in range(len(preds)):
            if preds[ii] > 0.5:
                y_pred2.append('COVID')

            else:
                y_pred2.append('NonCOVID')

        i += 1
        if i % math.ceil(len(X_test) / bs) == 0:
            break

    return y_pred, y_pred2
Пример #9
0
                          shuffle=True,
                          num_workers=NUM_WORKERS,
                          pin_memory=True)
valid_loader = DataLoader(dataset=valid_dataset,
                          batch_size=BATCH_SIZE,
                          shuffle=False,
                          num_workers=NUM_WORKERS,
                          pin_memory=True)
test_loader = DataLoader(dataset=test_dataset,
                         batch_size=BATCH_SIZE,
                         shuffle=False,
                         num_workers=NUM_WORKERS,
                         pin_memory=True)
print('Data Loaded')

model = DenseNet121(num_labels)
# mean of nn.CrossEntropyLoss() on each label, where nn.CrossEntropyLoss() include softmax & cross entropy, it is faster and stabler than cross entropy
# criterion = nn.CrossEntropyLoss()
# nn.BCEWithLogitsLoss() include sigmoid & BCELoss(), it is faster and stabler than BCELoss
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)

if torch.cuda.device_count() > 1:
    print("Use", torch.cuda.device_count(), "GPUs")
    model = nn.DataParallel(model)

device = torch.device(
    "cuda" if torch.cuda.is_available() and USE_CUDA else "cpu")
model.to(device)
criterion.to(device)
def predict(X_test, model_main=None):

    i = 0
    nrows = 256
    ncolumns = 256
    num_classes = 2

    model_main = DenseNet121(num_classes, pretrained=True)
    checkpoint0 = torch.load("Model_densenet121_state.pth")
    model_main.load_state_dict(checkpoint0)

    for param in model_main.parameters():
        param.requires_grad_(False)

    X_t = []
    X_test = np.reshape(np.array(X_test), [
        len(X_test),
    ])

    for img in list(range(0, len(X_test))):
        if X_test[img].ndim >= 3:
            X_t.append(
                np.moveaxis(
                    cv2.resize(X_test[img][:, :, :3], (nrows, ncolumns),
                               interpolation=cv2.INTER_LINEAR), -1, 0))
        else:
            smimg = cv2.cvtColor(X_test[img], cv2.COLOR_GRAY2RGB)
            X_t.append(
                np.moveaxis(
                    cv2.resize(smimg, (nrows, ncolumns),
                               interpolation=cv2.INTER_LINEAR), -1, 0))

    x = np.array(X_t)
    y_pred = []

    torch.manual_seed(8)
    torch.cuda.manual_seed(8)
    np.random.seed(8)
    random.seed(8)
    device = torch.device("cpu")

    model_main.eval()

    image_transforms = transforms.Compose([
        transforms.Lambda(lambda x: x / 255),
        transforms.ToPILImage(),
        #transforms.Resize((224, 224)),
        transforms.Resize((230, 230)),
        transforms.CenterCrop(size=224),
        transforms.ToTensor(),
        transforms.Normalize([0.45271412, 0.45271412, 0.45271412],
                             [0.33165374, 0.33165374, 0.33165374])
    ])

    dataset = MyDataset_test(x, image_transforms)
    dataloader = DataLoader(dataset,
                            batch_size=16,
                            pin_memory=True,
                            worker_init_fn=np.random.seed(7),
                            drop_last=False)

    for inputs in dataloader:
        #inputs = torch.from_numpy(inputs).float()
        inputs = inputs.to(device, non_blocking=True)
        outputs = model_main(inputs)
        _, preds = torch.max(outputs, 1)

        #pred = clf.predict(outputs.cpu())
        for ii in range(len(preds)):
            if preds[ii] > 0.5:
                y_pred.append('COVID')

            else:
                y_pred.append('NonCOVID')

        i += 1
        if i % math.ceil(len(X_test) / 16) == 0:
            break

    ## Replacing the last fully connected layer with SVM or ExtraTrees Classifiers

    model_main.fc = nn.Identity()
    clf = loaded_model = joblib.load('classifier_model.sav')
    for param in model_main.parameters():
        param.requires_grad_(False)
    y_pred2 = []
    for inputs in dataloader:
        inputs = inputs.to(device, non_blocking=True)
        outputs = model_main(inputs)
        preds = clf.predict(outputs)

        for ii in range(len(preds)):
            if preds[ii] > 0.5:
                y_pred2.append('COVID')
            #print('covid')
            else:
                y_pred2.append('NonCOVID')
            #print('noncovid')
        i += 1
        if i % math.ceil(len(X_test) / 16) == 0:
            break

    return y_pred, y_pred2