Beispiel #1
0
def predict(img_path,
            model,
            num_labels,
            postfix,
            main_folder_path,
            eval_method,
            gpu0,
            useGPU,
            stride=50,
            patch_size=70,
            test_augm=True,
            extra_patch=30):
    #read image
    img = PP.numpyFromScan(img_path)
    #read wmh
    gt_path = img_path.replace('slices',
                               'gt_slices').replace('FLAIR',
                                                    'wmh').replace('/pre', '')
    gt, affine = PP.numpyFromScan(gt_path,
                                  get_affine=True,
                                  makebin=(num_labels == 2))

    img = img.transpose((3, 0, 1, 2))
    img = img[np.newaxis, :]
    gt = gt.transpose((3, 0, 1, 2))

    if eval_method == 0:
        if useGPU:
            out_v = model(
                Variable(torch.from_numpy(img).float(),
                         volatile=True).cuda(gpu0))
        else:
            out_v = model(
                Variable(torch.from_numpy(img).float(), volatile=True))
        out = out_v.data[0].cpu().numpy()
        #FIX?
        del out_v
        out_v = Variable(torch.from_numpy(np.array([1])).float())
        out_v = Variable(torch.from_numpy(np.array([1])).float())
    elif eval_method == 1:
        out = predictByPatches(img,
                               model,
                               num_labels,
                               useGPU,
                               gpu0,
                               stride=stride,
                               patch_size=patch_size,
                               test_augm=test_augm,
                               extra_patch=extra_patch)
    out = out.squeeze()
    #take argmax to get predictions
    out = np.argmax(out, axis=0)
    #remove batch and label dimension
    img = img.squeeze()
    out = out.squeeze()
    gt = gt.squeeze()

    return img, gt, out, affine
def evalModel(model):
    img_list = open(eval_list).readlines()
    if test_mode:
        if models_path == 'None':
            print('Insert model path if you are testing this model')
            sys.exit()
        model = loadSnapshot(model, models_path)

        for img_str in img_list:
            img_str = img_str.rstrip()
            img, gt, out, affine = EF.predict(os.path.join(main_folder_path, img_str),
                                                        model, num_labels, postfix, main_folder_path, eval_method, 
                                                        gpu0, useGPU, patch_size = patch_pred_size, test_augm = test_augm, extra_patch = extra_patch)

            #save prediction
            save_path = os.path.join('temp_preds', 'pred_' + img_str.split('/')[-3] + '_s' + str(gt.shape[0]) + '.nii.gz')
            PP.saveScan(out, affine, save_path)
    else:
        if single_eval:
            r = range(1)
        else:
            r = range(iter_low, iter_high, iter_step)
        for iter in r:
            counter = 0
            if single_eval:
                model = loadSnapshot(model, models_path)
            else:
                model = loadSnapshot(model, os.path.join(snapshots_path, models_path + '_' + str(iter*1000) + '.pth'))
            r_list_iou = []
            r_list_dice = []
            r_list_recall = []
            r_list_precision = []
            for img_str in img_list:
                img_str = img_str.rstrip()
                img, gt, out, _ = EF.predict(os.path.join(main_folder_path, img_str),
                                                                model, num_labels, postfix, main_folder_path,
                                                                eval_method, gpu0, useGPU,  patch_size = patch_pred_size, test_augm = test_augm, extra_patch = extra_patch)

                result_iou = METRICS.metricEval('iou', out, gt, num_labels)
                result_dice = METRICS.metricEval('dice', out, gt, num_labels)
                result_recall = METRICS.metricEval('recall', out, gt, num_labels)
                result_precision = METRICS.metricEval('precision', out, gt, num_labels)

                r_list_iou.append(result_iou)
                r_list_dice.append(result_dice)
                r_list_recall.append(result_recall)
                r_list_precision.append(result_precision)
                counter += 1
                print "Model Iter {:5d} Progress: {:4d}/{:4d} iou {:1.4f} dice {:1.4f} recall {:1.4f} precision {:1.4f}  \r".format(iter * 1000, counter, len(img_list), result_iou, result_dice, result_recall, result_precision),
                sys.stdout.flush()
            avg_iou = np.sum(np.asarray(r_list_iou))/len(r_list_iou)
            avg_dice = np.sum(np.asarray(r_list_dice))/len(r_list_dice)
            avg_recall = np.sum(np.asarray(r_list_recall))/len(r_list_recall)
            avg_precision = np.sum(np.asarray(r_list_precision))/len(r_list_precision)
            results_file.write('Iterations: {:5d} iou: {:1.4f} dice: {:1.4f} recall: {:1.4f} precision: {:1.4f} \n'.format(iter*1000, avg_iou, avg_dice, avg_recall, avg_precision))
            print('Done!')
        results_file.close()
def modelInit():
    isPriv = False
    if arch_id > 10:
        isPriv = True

    if experiment != 'None':
        dilation_arr, isPriv, withASPP = PP.getExperimentInfo(experiment)
        model = exp_net_3D.getExpNet(num_labels, dilation_arr, isPriv, NoLabels2 = num_labels2, withASPP = withASPP)
    elif arch_id == 0:
        model = deeplab_resnet_3D.Res_Deeplab(num_labels)
    elif arch_id == 1:
        model = unet_3D.UNet3D(1, num_labels)
    elif arch_id == 2:
        model = highresnet_3D.getHRNet(num_labels)

    if model_path != 'none':
        if useGPU:
            #loading on GPU when model was saved on GPU
            saved_state_dict = torch.load(model_path)
        else:
            #loading on CPU when model was saved on GPU
            saved_state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
        model.load_state_dict(saved_state_dict)

    model.float()
    model.eval() # use_global_stats = True
    return model, isPriv
def modelInit():
    isPriv = False
    f_name = models_path.split('/')[-1]
    #load model
    if 'EXP3D' in f_name:
        experiment = f_name.replace('EXP3D_', '').replace('.pth', '').split('_')
        experiment = '_'.join(experiment[0:3])
        dilation_arr, isPriv, withASPP = PP.getExperimentInfo(experiment)
        model = exp_net_3D.getExpNet(num_labels, dilation_arr, isPriv, NoLabels2 = num_labels2, withASPP = withASPP)
    elif 'HR3D' in f_name:
        model = highresnet_3D.getHRNet(num_labels)
    elif 'DL3D' in f_name:
        model = deeplab_resnet_3D.Res_Deeplab(num_labels)
    elif 'UNET3D' in  f_name:
        model = unet_3D.UNet3D(1, num_labels)
    else:
        print('No model available for this .pth')
        sys.exit()

    model.eval()

    if useGPU:
        model.cuda(gpu0)

    return model, isPriv
Beispiel #5
0
def bias_adjust(cf=None,
                cinfo=None,
                cvarible=None,
                observations=ds_obs,
                output_gem_dir=output_gem_dir):
    print(cf)
    # Create new point forcing class
    cpt = PP.point_forcing(cinfo=cinfo,
                           ds_grid_obs=observations,
                           cvariable=cvarible)

    # Define calibration and evaluation periods
    wyrs = cpt.define_cal_val_periods(method='water_year')

    # Fit model
    (model_bias, true_bias) = cpt.fit_simple_bias_model(periods=wyrs)

    # Modify in place CHM forcing for variable X
    df_adj = cpt.apply_bias(periods=wyrs,
                            model_bias=model_bias,
                            cvariable=cvarible)

    # Write out to new output_dir
    file_out = os.path.join(output_gem_dir, cf + '.chm')
    cpt.write_to_ascii(df_out=df_adj, file_out=file_out)

    return (model_bias, true_bias)
def get_data_from_chunk_v2(chunk):

    main_folder_path = '../../Data/MS2017a/'
    scans_folder_path = main_folder_path + 'scans/'

    img_type_path = 'pre/FLAIR.nii.gz'
    gt_type_path = 'wmh.nii.gz'

    scale = random.uniform(0.5, 1.3)
    dim = int(scale * 321)

    images = np.zeros((dim, dim, 1, len(chunk)))
    gt = np.zeros((dim, dim, 1, len(chunk)))
    for i, piece in enumerate(chunk):
        print(os.path.join(main_folder_path, piece))
        img_temp = PP.numpyFromScan(os.path.join(main_folder_path, piece))
        flip_p = random.uniform(0, 1)

        img_temp = cv2.resize(img_temp, (321, 321)).astype(float)
        img_temp = img_temp.reshape([321, 321, 1])

        img_temp = scale_im(img_temp, scale)
        img_temp = flip(img_temp, flip_p)
        images[:, :, 0, i] = img_temp

        piece_gt = piece.replace('slices', 'gt_slices').replace('FLAIR', 'wmh')
        gt_temp = PP.numpyFromScan(os.path.join(main_folder_path, piece_gt),
                                   makebin=onlyLesions)
        gt_temp = cv2.resize(gt_temp, (321, 321),
                             interpolation=cv2.INTER_NEAREST)
        gt_temp = gt_temp.reshape([321, 321, 1])
        gt_temp = scale_gt(gt_temp, scale)
        gt_temp = flip(gt_temp, flip_p)

        gt[:, :, 0, i] = gt_temp
        a = outS(321 * scale)

    labels = [resize_label_batch(gt, i) for i in [a, a, a, a]]

    #from dim1 x dim2 x 1 x batch -> batch x 1 x dim1 x dim2
    images = images.transpose((3, 2, 0, 1))
    images = torch.from_numpy(images).float()
    return images, labels
def convertSize2(from_path, to_path, new_size, interpolation = 'interpolate'):
	if interpolation == 'interpolate':
		spline_order = [2]
	elif interpolation == 'nearest':
		spline_order = [0]

	img_np, affine = PP.numpyFromScan(from_path, get_affine = True)
	shape = img_np.shape
	new_affine = np.copy(affine)
	r1 = float(new_size[0]) / shape[0]
	r2 = float(new_size[1]) / shape[1] 
	r3 = float(new_size[2]) / shape[2] 
	new_affine[:,0] /= r1
	new_affine[:,1] /= r2
	new_affine[:,2] /= r3

	img_np = AUGM.applyScale([img_np], [r1,r2,r3], spline_order)[0].squeeze()

	PP.saveScan(img_np, new_affine, to_path)
	return new_affine
def convertSize(from_path, to_path, new_size, interpolation = 'interpolate'):
	img = PP.numpyFromScan(from_path)
	shape = img.shape
	r1 = shape[0] / float(new_size[0])
	r2 = shape[1] / float(new_size[1])
	r3 = shape[2] / float(new_size[2])
	command = "mri_convert " + from_path + " " + to_path + " -ds " + str(r1) + " " + str(r2) + " " + str(r3) + " -rt " + interpolation
	print(command)
	#normalize
	process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
	process.communicate()
Beispiel #9
0
def trainLandmarks(main_folder_path=main_folder_path, postfix=postfix):
    scan_folders = glob.glob(main_folder_path + 'scans/*')
    FLAIR_path = '/pre/FLAIR' + postfix + '.nii.gz'
    m_arr = np.zeros([len(scan_folders), len(m_p)])

    for i, sf in enumerate(scan_folders):
        print "Landmark training: {:4d}/{:4d}\r".format(i, len(scan_folders)),
        sys.stdout.flush()

        img_str = sf + FLAIR_path
        img_np = PP.numpyFromScan(img_str)

        p, m = NORM.getLandmarks(img_np)
        mapped_m = np.array([int(NORM.mapLandmarks(p, s, x)) for x in m],
                            dtype=np.int64)
        m_arr[i, :] = mapped_m

    mean_m = np.mean(m_arr, axis=0, dtype=np.int64)

    NORM.writeHistInfo(save_path, pc, s, m_p, mean_m)
Beispiel #10
0
def getScale(main_folder_path=main_folder_path, postfix=postfix):
    scan_folders = glob.glob(main_folder_path + 'scans/*')

    FLAIR_path = '/pre/FLAIR' + postfix + '.nii.gz'
    min_p = None
    max_p = None
    for i, sf in enumerate(scan_folders):
        print "Scale obtaining: {:4d}/{:4d} 		\r".format(i, len(scan_folders)),
        sys.stdout.flush()

        img_str = sf + FLAIR_path
        img_np = PP.numpyFromScan(img_str)
        p, m = NORM.getLandmarks(img_np)
        if min_p is None:
            min_p = p[0]
            max_p = p[1]
        if min_p > p[0]:
            min_p = p[0]
        if max_p < p[1]:
            max_p = p[1]
    return (min_p, max_p)
def normalizeScan(from_path, to_path, main_folder_path = main_folder_path):
	img_np, affine = PP.numpyFromScan(from_path, get_affine = True)
	img_np = NORM.applyNormalize(img_np.squeeze(), postfix, norm_method = norm_type[0], main_folder_path = main_folder_path)
	img_np = NORM.applyNormalize(img_np.squeeze(), postfix, norm_method = norm_type[1], main_folder_path = main_folder_path)
	PP.saveScan(img_np, affine, to_path)
def predict(img_path,
            model,
            num_labels,
            num_labels2,
            postfix,
            main_folder_path,
            eval_method,
            gpu0,
            useGPU,
            stride=50,
            patch_size=70,
            test_augm=True,
            extra_patch=30,
            priv_eval=True):
    #read image
    img = PP.numpyFromScan(img_path)
    #read wmh
    gt_path = img_path.replace('slices',
                               'gt_slices').replace('FLAIR',
                                                    'wmh').replace('/pre', '')
    gt, affine = PP.numpyFromScan(gt_path,
                                  get_affine=True,
                                  makebin=(num_labels == 2))

    gif_path = img_path.replace('scans', 'gifs').replace(
        'FLAIR', 'parcellation').replace('/pre', '')
    gif = PP.numpyFromScan(gif_path)

    img = img.transpose((3, 0, 1, 2))
    img = img[np.newaxis, :]
    gt = gt.transpose((3, 0, 1, 2))
    gif = gif.transpose((3, 0, 1, 2))

    if eval_method == 0:
        if useGPU:
            out1_v, out2_v = model(
                Variable(torch.from_numpy(img).float(),
                         volatile=True).cuda(gpu0))
        else:
            out1_v, out2_v = model(
                Variable(torch.from_numpy(img).float(), volatile=True))
        out1 = out1_v.data[0].cpu().numpy()
        out2 = out2_v.data[0].cpu().numpy()
        del out1_v, out2_v
    elif eval_method == 1:
        out1, out2 = predictByPatches(img,
                                      model,
                                      num_labels,
                                      num_labels2,
                                      useGPU,
                                      gpu0,
                                      stride=stride,
                                      test_augm=test_augm,
                                      patch_size=patch_size,
                                      extra_patch=extra_patch,
                                      priv_eval=priv_eval)
    out1 = out1.squeeze()
    out1 = np.argmax(out1, axis=0)
    out1 = out1.squeeze()

    out2 = out2.squeeze()
    out2 = np.argmax(out2, axis=0)
    out2 = out2.squeeze()

    #remove batch and label dimension
    img = img.squeeze()

    return img, gif, out1, gt, out2, affine
def trainModel(model):
    if useGPU:
        model.cuda(gpu0)
    optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = base_lr)

    optimizer.zero_grad()
    print(model)
    curr_val = 0
    best_val = 0
    val_change = False
    loss_arr = np.zeros([iter_size])
    loss_arr_i = 0
    stage = 0
    print('---------------')
    print('STAGE ' + str(stage))
    print('---------------')

    for iter in range(iter_low, iter_high):
        if iter > max_iter_stage0 and stage != 1:
            print('---------------')
            print('Stage 1')
            print('---------------')
            stage = 1

        if train_method == 0:
            img_b, label_b, _ = PP.extractImgBatch(batch_size, img_list, img_dims, onlyLesions, 
                                                    main_folder_path = '../Data/MS2017b/')
        elif train_method == 1 or train_method == 2:
            if stage == 0:
                batch_size = 1
                img_b, label_b, _ = PP.extractPatchBatch(batch_size, patch_size_stage0, img_list, onlyLesions, center_pixel = to_center_pixel, main_folder_path = '../Data/MS2017b/', postfix=postfix)
            else:
                batch_size = 1
                img_b, label_b, _ = PP.extractPatchBatch(batch_size, patch_size, img_list, onlyLesions, center_pixel = to_center_pixel, main_folder_path = '../Data/MS2017b/', postfix=postfix)
        else:
            print('Invalid training method format')
            sys.exit()

        if stage == 0:
            img_b, label_b = AUG.augmentPatchLossLess([img_b, label_b])
        img_b, label_b = AUG.augmentPatchLossy([img_b, label_b])
        #img_b, label_b = AUG.augmentPatchLossless(img_b, label_b)
        #img_b is of shape      (batch_num) x 1 x dim1 x dim2 x dim3
        #label_b is of shape    (batch_num) x 1 x dim1 x dim2 x dim3
        #batch_num should be 1 since too memory intensive

        label_b = label_b.astype(np.int64)
        #convert label from (batch_num x 1 x dim1 x dim2 x dim3)
        #               to  ((batch_numxdim1*dim2*dim3) x 3) (one hot)
        temp = label_b.reshape([-1])
        label_b = np.zeros([temp.size, num_labels])
        label_b[np.arange(temp.size),temp] = 1
        label_b = torch.from_numpy(label_b).float()

        imgs = torch.from_numpy(img_b).float()

        if useGPU:
            imgs, label_b = Variable(imgs).cuda(gpu0), Variable(label_b).cuda(gpu0)
        else:
            imgs, label_b = Variable(imgs), Variable(label_b)

        #---------------------------------------------
        #out size is      (1, 3, dim1, dim2, dim3)
        #---------------------------------------------
        out = model(imgs)
        out = out.permute(0,2,3,4,1).contiguous()
        out = out.view(-1, num_labels)
        #---------------------------------------------
        #out size is      (1 * dim1 * dim2 * dim3, 3)
        #---------------------------------------------

        #loss function
        m = nn.Softmax()
        loss = lossF.simple_dice_loss3D(m(out), label_b)

        loss /= iter_size
        loss.backward()

        loss_val = loss.data.cpu().numpy()
        loss_arr[loss_arr_i] = loss_val
        loss_arr_i = (loss_arr_i + 1) % iter_size

        if iter % 1 == 0:
            if val_change:
                print "iter = {:6d}/{:6d}       Loss: {:1.6f}       Val Score: {:1.6f}     \r".format(iter-1, max_iter, float(loss_val)*iter_size, curr_val),
                sys.stdout.flush()
                print ""
                val_change = False
            print "iter = {:6d}/{:6d}       Loss: {:1.6f}       Val Score: {:1.6f}     \r".format(iter, max_iter, float(loss_val)*iter_size, curr_val),
            sys.stdout.flush()
        if iter % 1000 == 0:
            val_change = True
            curr_val = EF.evalModelX(model, num_labels, postfix, main_folder_path, (train_method != 0), gpu0, useGPU, eval_metric = 'iou', patch_size = patch_size, extra_patch = 5)
            if curr_val > best_val:
                best_val = curr_val
                print('\nSaving better model...')
                torch.save(model.state_dict(), model_file_path)
            logfile.write("iter = {:6d}/{:6d}       Loss: {:1.6f}       Val Score: {:1.6f}     \n".format(iter, max_iter, np.sum(loss_arr), curr_val))
            logfile.flush()
        if iter % iter_size == 0:
            optimizer.step()
            optimizer.zero_grad()

        del out, loss
if num_labels == 2:
    onlyLesions = True
else:
    onlyLesions = False

if useGPU:
    cudnn.enabled = True
else:
    cudnn.enabled = False

if experiment != 'None':
    snapshot_prefix = 'EXP3D' + '_' + experiment + '_' + loss_name + '_' + str(train_method)
else:
    if arch_id == 0:
        snapshot_prefix = 'DL3D_' + loss_name + '_' + str(train_method) + '_' + PP.getTime()
    elif arch_id == 1:
        snapshot_prefix = 'UNET3D_' + loss_name + '_' + str(train_method) + '_' + PP.getTime()
    elif arch_id == 2:
        snapshot_prefix = 'HR3D' + loss_name + '_' + str(train_method) + '_' + PP.getTime()
to_center_pixel = False
center_pixel_folder_path, locs_lesion, locs_other = (None, None, None)
if train_method == 2:
    to_center_pixel = True
    if not os.path.exists(os.path.join(main_folder_path, 'centerPixelPatches' + postfix + '_' + str(patch_size))):
        print('Pixel patch folder does not exist')
        sys.exit()
#load few files
img_list = PP.read_file(list_path)

results_folder = 'train_results/'
Beispiel #15
0
                         snapPrefix + str(iter * 1000) + '.pth'))
    else:
        #loading on CPU when model was saved on GPU
        saved_state_dict = torch.load(
            os.path.join(snapshots_path,
                         snapPrefix + str(iter * 1000) + '.pth'),
            map_location=lambda storage, loc: storage)

    model.load_state_dict(saved_state_dict)
    pytorch_list = []
    counter = 0
    for img_str in img_list:
        try:
            #print(img_str[:-1])
            img = np.zeros((513, 513, 1))
            img_temp = PP.numpyFromScan(
                os.path.join(main_folder_path, img_str[:-1]))

            img_original = img_temp
            img[:img_temp.shape[0], :img_temp.shape[1], :] = img_temp

            gt_str = img_str.replace('slices',
                                     'gt_slices').replace('FLAIR', 'wmh')
            gt = PP.numpyFromScan(os.path.join(main_folder_path, gt_str[:-1]),
                                  makebin=onlyLesions)
        except IOError:
            continue

        if useGPU:
            output = model(
                Variable(torch.from_numpy(img[np.newaxis, :].transpose(
                    0, 3, 1, 2)).float(),
Beispiel #16
0
                if key == 'snippet':
                    googleSearchlist.append(value)
        gSnippetDf = pd.DataFrame(
            googleSearchlist,
            columns=[
                'Google search snippets: ' + ' : Total Result count : ' +
                resultsDict['total']
            ])
        print(gSnippetDf)
        googleSearchSnippetlist.append(googleSearchlist)
        print(googleSearchSnippetlist)
    return googleSearchSnippetlist


# sentence = ["AI and humans have always been friendly.","AI is our friend and it has been friendly."
PP.displayInsertSentencesLayout()
sentence = PP.getSentences()
googleSearchSnippetlist = googleSearch_snippet(sentence)

print(googleSearchSnippetlist)
print(len(googleSearchSnippetlist))
# sentence = [S1,S2] ################## this

#------------------- Text processing-------------------------#

# Text processing
import nltk
import string
import math
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
Beispiel #17
0
                if webKey == "value":
                    for valueItems in webValueItems:
                        for valueKey, valueItem in valueItems.items():
                            if valueKey == "snippet":
                                bingSearchSnippetlist.append(valueItem)

    print(searchSentence)
    bingdf = pd.DataFrame(
        bingSearchSnippetlist,
        columns=['Bing search snippets for search terms : ' + searchSentence])
    display(bingdf)
    return bingSearchSnippetlist


# sentence = ["AI and humans have always been friendly.","AI is our friend and it has been friendly."
PP.displayInsertSentencesLayout()
bingSearchSnippetlist = bingSearch_snippet(PP.getSentences()[0])
First_snippets = bingSearchSnippetlist
bingSearchSnippetlist = bingSearch_snippet(PP.getSentences()[1])
Second_snippets = bingSearchSnippetlist
Snippets = [First_snippets, Second_snippets]

print(Snippets)
print(len(Snippets))

#?+++## bingSearchSnippetlist form should be same with googlesearch: [10 snippets for sentence1,10 snippetsfor sentence2]

# sentence = [S1,S2] ################## this

#------------------- Text processing-------------------------#
import PP
import torch

#step 1: read image from input folder
#step 2: resize image to 200x200x100 + apply normalizations
#step 3: make prediction by patches (with augmentations)
#step 4: save prediction to output folder
#step 5: resize prediction back to original size of image

img_path = os.path.join(inputDir, 'FLAIR.nii.gz')
img_path_rs = os.path.join(outputDir, 'FLAIR_rs.nii.gz')

wmh_path_rs = os.path.join(outputDir, 'wmh_rs.nii.gz')
wmh_path = os.path.join(outputDir, 'result.nii.gz')

old_size = PP.numpyFromScan(img_path).shape

new_size = [200, 200, 100]
num_labels = 2

#convert scan to 200x200x100
RS.convertSize2(img_path, img_path_rs, new_size)
#get the affine value
affine_rs = nib.load(img_path_rs).get_affine()

#normalize using histogram and variance normalization
RS.normalizeScan(img_path_rs, img_path_rs, main_folder_path=main_folder_path)

#read preprocessed img
img, affine = PP.numpyFromScan(img_path_rs, get_affine=True)
img = img.transpose((3, 0, 1, 2))
def trainModelPriv(model):
    if useGPU:
        model.cuda(gpu0)
    optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = base_lr)
    optimizer.zero_grad()
    print(model)
    curr_val1 = 0
    curr_val2 = 0
    best_val2 = 0
    val_change = False
    loss_arr1 = np.zeros([iter_size])
    loss_arr2 = np.zeros([iter_size])
    loss_arr_i = 0

    stage = 0
    print('---------------')
    print('STAGE ' + str(stage))
    print('---------------')

    for iter in range(iter_low, iter_high):
        if iter > max_iter_stage0 and stage != 1:
            print('---------------')
            print('Stage 1')
            print('---------------')
            stage = 1

        if train_method == 0:
            img_b, label_b, gif_b = PP.extractImgBatch(batch_size, img_list, img_dims, onlyLesions, 
                                                    main_folder_path = '../Data/MS2017b/', with_priv = True)
        elif train_method == 1 or train_method == 2:
            if stage == 0:
                batch_size = 5
                img_b, label_b, gif_b = PP.extractPatchBatch(batch_size, patch_size_stage0, img_list, onlyLesions,
                                                            center_pixel = to_center_pixel, 
                                                            main_folder_path = '../Data/MS2017b/', 
                                                            postfix=postfix, with_priv= True)
            else:
                batch_size = 1
                img_b, label_b, gif_b = PP.extractPatchBatch(batch_size, patch_size, img_list, onlyLesions, 
                                                    center_pixel = to_center_pixel, 
                                                    main_folder_path = '../Data/MS2017b/', 
                                                    postfix=postfix, with_priv= True)
        else:
            print('Invalid training method format')
            sys.exit()

        img_b, label_b, gif_b = AUG.augmentPatchLossy([img_b, label_b, gif_b])

        #img_b is of shape      (batch_num) x 1 x dim1 x dim2 x dim3
        #label_b is of shape    (batch_num) x 1 x dim1 x dim2 x dim3

        label_b = label_b.astype(np.int64)

        #convert label from (batch_num x 1 x dim1 x dim2 x dim3)
        #               to  ((batch_numxdim1*dim2*dim3) x 3) (one hot)
        temp = label_b.reshape([-1])
        label_b = np.zeros([temp.size, num_labels])
        label_b[np.arange(temp.size),temp] = 1
        label_b = torch.from_numpy(label_b).float()

        imgs = torch.from_numpy(img_b).float()

        if useGPU:
            imgs, label_b = Variable(imgs).cuda(gpu0), Variable(label_b).cuda(gpu0)
        else:
            imgs, label_b = Variable(imgs), Variable(label_b)

        gif_b = setupGIFVar(gif_b)

        #---------------------------------------------
        #out size is      (1, 3, dim1, dim2, dim3)
        #---------------------------------------------
        #out1 is extra info
        out1, out2 = model(imgs)

        out1 = out1.permute(0,2,3,4,1).contiguous()
        out1 = out1.view(-1, num_labels2)

        out2 = out2.permute(0,2,3,4,1).contiguous()
        out2 = out2.view(-1, num_labels)
        #---------------------------------------------
        #out size is      (1 * dim1 * dim2 * dim3, 3)
        #---------------------------------------------
        m2 = nn.Softmax()
        loss2 = lossF.simple_dice_loss3D(m2(out2), label_b)
        m1 = nn.LogSoftmax()
        loss1 = F.nll_loss(m1(out1), gif_b)

        loss1 /= iter_size
        loss2 /= iter_size

        torch.autograd.backward([loss1, loss2])

        loss_val1 = float(loss1.data.cpu().numpy())
        loss_arr1[loss_arr_i] = loss_val1

        loss_val2 = float(loss2.data.cpu().numpy())
        loss_arr2[loss_arr_i] = loss_val2

        loss_arr_i = (loss_arr_i + 1) % iter_size

        if iter % 1 == 0:
            if val_change:
                print "iter = {:6d}/{:6d}       Loss_main: {:1.6f}    Loss_secondary: {:1.6f}       Val Score: {:1.6f}      Val Score secondary: {:1.6f}     \r".format(iter-1, max_iter, loss_val2*iter_size, loss_val1*iter_size, curr_val2, curr_val1),
                sys.stdout.flush()
                print ""
                val_change = False
            print "iter = {:6d}/{:6d}       Loss_main: {:1.6f}      Loss_secondary: {:1.6f}       Val Score main: {:1.6f}      Val Score secondary: {:1.6f}     \r".format(iter, max_iter, loss_val2*iter_size, loss_val1*iter_size, curr_val2, curr_val1),
            sys.stdout.flush()
        if iter % 2000 == 0:
            val_change = True
            curr_val1, curr_val2 = EFP.evalModelX(model, num_labels, num_labels2, postfix, main_folder_path, (train_method != 0), gpu0, useGPU, eval_metric = 'iou', patch_size = patch_size, extra_patch = 5, priv_eval = True)
            if curr_val2 > best_val2:
                best_val2 = curr_val2
                torch.save(model.state_dict(), model_file_path)
                print('\nSaving better model...')
            logfile.write("iter = {:6d}/{:6d}       Loss_main: {:1.6f}      Loss_secondary: {:1.6f}       Val Score main: {:1.6f}      Val Score secondary: {:1.6f}  \n".format(iter, max_iter, np.sum(loss_arr2), np.sum(loss_arr1), curr_val2, curr_val1))
            logfile.flush()
        if iter % iter_size == 0:
            optimizer.step()
            optimizer.zero_grad()

        del out1, out2, loss1, loss2
Beispiel #20
0
import PP
import config

app = PP.create_app(config)
# This is only used when running locally. When running live, gunicorn runs
# the application.
if __name__ == '__main__':
    app.run(host='127.0.0.1', port=8080, debug=True)
Beispiel #21
0
import argparse

sys.path.append('/home/srm/aa/PYTHON/PROJECTS/DBI')

import PP

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-table', help="Enter the mysql database table name.")
    parser.add_argument('-Write_CSV',
                        help="To write csv file name Enter file name.")
    parser.add_argument('-Write_EXCEL',
                        help="To Write excel file Enter file name.")
    parser.add_argument('-Write_JSON',
                        help="To write json file Enter file name.")
    args = parser.parse_args()

    if args.table:
        aa = PP.Read_DBI(args.table)
        print(aa)

    if args.Write_EXCEL:
        print(args.Write_EXCEL)
        PP.Write_EXCEL(args.Write_EXCEL, aa)

    if args.Write_JSON:
        PP.Write_JSON(args.Write_JSON, aa)

    if args.Write_CSV:
        PP.Write_CSV(args.Write_CSV, aa)