def call_screening_model(test_screening_loader):
    # os.chdir('D:\\de\\12_credit\\crebral_microbleeds\\cmb code\\fcn_cmb')
    path_scr = 'checkpoints_screening_stage2\\checkpoint_169.pth.tar'
    # path_scr = 'checkpoint_screeningStage2_corrected\\checkpoint_53.pth.tar'
    # device = torch.device('cpu')
    model_fcn = model_screening.CNN()
    # model_scr.to(device)
    model_fcn.eval()

    state = torch.load(path_scr)
    # load params
    model_fcn.load_state_dict(state['state_dict'], strict=False)

    pbar = tqdm(enumerate(test_screening_loader),
                total=len(test_screening_loader))

    count_cmb = 0
    count_candidate = 0

    for index, patch in pbar:
        patch = Variable(patch)

        pred_candidate_score = model_fcn(patch)

        pred_candidate_score = torch.nn.functional.softmax(
            pred_candidate_score)

    return (pred_candidate_score)
Esempio n. 2
0
def call_screening_model(test_screening_loader):
    os.chdir('D:\\de\\12_credit\\crebral_microbleeds\\cmb code\\cmb_FCN')
    path_scr = 'D:\\de\\12_credit\\crebral_microbleeds\\cmb code\\cmb_FCN\\checkpoint_390.pth.tar'
    # device = torch.device('cpu')
    model_scr = model_screening.CNN()
    # model_scr.to(device)
    model_scr.eval()
    state = torch.load(path_scr, map_location='cpu')

    # load params
    model_scr.load_state_dict(state['state_dict'])

    pbar = tqdm(enumerate(test_screening_loader),
                total=len(test_screening_loader))

    count_cmb = 0
    count_candidate = 0

    for index, patch in pbar:
        patch = Variable(patch)

        pred_candidate_score = model_scr(patch)

        pred_candidate_score = torch.nn.functional.softmax(
            pred_candidate_score)

    return (pred_candidate_score)
def train(train_loader, model, epoch, num_epochs):
    if(epoch == 0):
        model = model_screening.CNN()
        device = torch.device('cuda')
        model.to(device)
        path = 'checkpoint_screeningStage2_corrected\\checkpoint_53.pth.tar'
        checkpoint = torch.load(path,map_location='cpu')
        model.load_state_dict(checkpoint['state_dict'])
        epoch = checkpoint['epoch']
        epoch = epoch + 1000



    device = torch.device('cuda')
    model.to(device)
    model.train()
    losses = AverageMeter()

    optimizer = optim.SGD(model.parameters(), lr=0.03, momentum=0.9)

    # optimizer = optim.Adadelta(model.parameters(), lr=1.0, rho=0.95, eps=1e-06, weight_decay=0)

    pbar = tqdm(enumerate(train_loader), total=len(train_loader))

    for i, (images, labels) in pbar:
        images = Variable(images.cuda())

        # images = images.unsqueeze(dim=1)

        # compute output
        optimizer.zero_grad()
        if epoch == 10053:
            images = nn.init.normal_(images, mean=0, std=0.01)


        outputs = model(images)

        labels = np.squeeze(labels)
        labels = Variable(labels.cuda())
        labels = labels.long()

        outputs = np.squeeze(outputs)
        outputs = torch.nn.functional.softmax(outputs)

        loss = torch.nn.functional.cross_entropy(outputs, labels)
        losses.update(loss.data, images.size(0))

        loss.backward()
        optimizer.step()

        pbar.set_description('[TRAIN] - EPOCH %d/ %d - BATCH LOSS: %.4f/ %.4f(avg) '
                             % (epoch + 1, num_epochs, losses.val, losses.avg))



    return losses.avg
def validate(val_loader, epoch_index):
    path = 'checkpoints_screening_stage1\\checkpoint_' + str(epoch_index) + '.pth.tar'
    device = torch.device("cuda")
    model = model_screening.CNN()
    model.to(device)
    model.eval()
    state = torch.load(path)

    # load params
    model.load_state_dict(state['state_dict'])


    losses = AverageMeter()

    optimizer = optim.SGD(model.parameters(),lr=0.03,momentum=0.9)

    # optimizer = optim.Adadelta(model.parameters(), lr=1.0, rho=0.95, eps=1e-06, weight_decay=0)

    # set a progress bar
    pbar = tqdm(enumerate(val_loader), total=len(val_loader))


    for i, (images, labels) in pbar:
        images = Variable(images.cuda())

        images = images.unsqueeze(dim=1)

        if epoch_index == 1:
            images = nn.init.normal_(images, mean=0, std=0.01)

        # compute output
        optimizer.zero_grad()

        outputs = model(images)

        labels = np.squeeze(labels)
        labels = Variable(labels.cuda())
        labels = labels.long()

        outputs = np.squeeze(outputs)
        outputs = torch.nn.functional.softmax(outputs)

        loss = torch.nn.functional.cross_entropy(outputs, labels)
        losses.update(loss.data, images.size(0))

        loss.backward()
        optimizer.step()

        pbar.set_description('[validate] - BATCH LOSS: %.4f/ %.4f(avg) '
                             % (losses.val, losses.avg))

    return losses.avg
Esempio n. 5
0
def prepare_datset_with_mimics(dataloader):
    # path = 'checkpoints_screening_stage1\\checkpoint_188.pth.tar'
    path = 'checkpoint_screeningStage2_corrected\\checkpoint_53.pth.tar'
    # path ='checkpoints_screening_stage2\\checkpoint_223.pth.tar'
    device = torch.device('cpu')
    model = model_screening.CNN()
    model.to(device)
    model.eval()
    state = torch.load(path, map_location='cpu')

    # load params
    model.load_state_dict(state['state_dict'])

    false_positive = []  # 28.85%
    positive = []  # 23.63 %
    negative = []  # 47.52 %
    false_negative = []

    # set a progress bar
    pbar = tqdm(enumerate(dataloader), total=len(dataloader))

    total = 0

    for i, (images, labels) in pbar:
        images = Variable(images)

        images = images.unsqueeze(dim=1)

        outputs = model(images)

        labels = np.squeeze(labels)
        labels = Variable(labels)
        labels = labels.long()

        outputs = np.squeeze(outputs)
        outputs = torch.nn.functional.softmax(outputs)

        for x in range(0, len(images)):
            total += 1
            max_op = max(outputs[x])
            if (max_op == outputs[x][0]):
                pred = 0.0
            else:
                pred = 1.0

            if (labels[x] == 0.0) & (pred == 1.0):
                false_positive.append([images[x], 0.0])

                # current_image = images[x]

                # convert tensor to numpy

                # current_image = np.asarray(current_image)

                # FLAIR_image_in_fliped =  (np.flip(current_image,axis=3)).copy()
                # FLAIR_image_in_fliped = torch.from_numpy(FLAIR_image_in_fliped)
                # false_positive.append([FLAIR_image_in_fliped, 0.0])
                #
                # FLAIR_image_shifted_1 = torch.from_numpy(np.roll(current_image, 10, 0)).float()
                # false_positive.append([FLAIR_image_shifted_1, 0.0])
                #
                # FLAIR_image_shifted_2 = torch.from_numpy(np.roll(current_image, -10, 0)).float()
                # false_positive.append([FLAIR_image_shifted_2, 0.0])

            if (labels[x] == 0.0) & (pred == 0.0):
                negative.append([images[x], 0.0])

            if (labels[x] == 1.0) & (pred == 1.0):
                positive.append([images[x], 1.0])

            if (labels[x] == 1.0) & (pred == 0.0):
                false_negative.append([images[x], 1.0])

    # new_negative_list = random.sample(negative, len(positive))
    # false_positive = random.sample(false_positive, len(positive))

    print(positive.__len__(), 'positive')
    print(negative.__len__(), 'negative')
    print(false_positive.__len__(), 'false_positive')
    print(false_negative.__len__(), 'false_negative')

    random.shuffle(negative)
    random.shuffle(positive)
    random.shuffle(false_positive)

    new_false_positive = remove_percentage(false_positive, 29)

    # new_false_positive = false_positive
    # new_positive = random.sample(positive,875)
    # new_negative_list = random.sample(negative,1750)

    new_negative_list = remove_percentage(negative, 0.47)

    new_positive = remove_percentage(positive, 0.24)

    print('after removal')

    print(new_false_positive.__len__(), 'false_positive')
    print(new_negative_list.__len__(), 'negative')
    print(new_positive.__len__(), 'positive')

    # positive_count = len(false_positive)
    # positive_list = random.sample(positive, positive_count)

    complete_dataset_stage2 = new_false_positive + new_negative_list + new_positive

    random.shuffle(complete_dataset_stage2)

    return complete_dataset_stage2
        losses.update(loss.data, images.size(0))

        loss.backward()
        optimizer.step()

        pbar.set_description('[validate] - BATCH LOSS: %.4f/ %.4f(avg) '
                             % (losses.val, losses.avg))

    return losses.avg


### MAIN PROGRAM STARTS HERE ###

train_loader, val_loader = create_dset_screening_stage1()

model = model_screening.CNN()

optimizer = optim.SGD(model.parameters(), lr=0.03, momentum=0.9)

# optimizer = optim.Adadelta(model.parameters(), lr=1.0, rho=0.95, eps=1e-06, weight_decay=0)

best_loss = 0

# run the training loop
num_epochs = 700

plt_loss = []
plt_loss.append(['Current epoch', 'Train loss', 'Validation loss'])

for epoch in range(0, num_epochs):