def main(): options = parse_inputs() c = color_codes() experimental = options['experimental'] exp_s = c['b'] + '(experimental %d)' % experimental if experimental else c[ 'b'] + '(baseline)' print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting cross-validation ' + exp_s + c['nc']) # N-fold cross validation main loop (we'll do 2 training iterations with testing for each patient) data_names, label_names = get_names_from_path(options) folds = len(data_names) fold_generator = izip( nfold_cross_validation(data_names, label_names, n=folds), xrange(folds)) dsc_results = list() for (train_data, train_labels, test_data, test_labels), i in fold_generator: print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['nc'] + 'Fold %d/%d: ' % (i + 1, folds) + c['g'] + 'Number of training/testing images (%d=%d/%d)' % (len(train_data), len(train_labels), len(test_data)) + c['nc']) # Prepare the data relevant to the leave-one-out (subtract the patient from the dataset and set the path) # Also, prepare the network if not check_image_list(test_data, options): print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['nc'] + c['g'] + 'Training' + c['nc']) net = train_net(i, train_data, train_labels, options) else: net = None # Then we test the net. print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['nc'] + c['g'] + 'Testing' + c['nc']) for p, gt_name in zip(test_data, test_labels): image, gt = test_net(net, p, gt_name, options) p_name = '-'.join( p[0].rsplit('/')[-1].rsplit('.')[0].rsplit('-')[:-1]) vals = np.unique(gt.flatten()) gt_mask = np.sum(map( lambda (l, val): np.array(gt == val, dtype=np.uint8) * l, enumerate(vals)), axis=0) results = (str.capitalize(p_name), dsc_seg(gt_mask == 1, image == 1), dsc_seg(gt_mask == 2, image == 2), dsc_seg(gt_mask == 3, image == 3)) dsc_results.append(results) print('%s DSC: %f/%f/%f' % results) dsc_results = sorted(dsc_results, cmp=lambda x, y: int(x[0][8:]) - int(y[0][8:])) for results in dsc_results: print(c['c'] + '%s DSC: \033[32;1m%f/%f/%f' % results + c['nc']) f_dsc = tuple( np.asarray([results[1:] for results in dsc_results]).mean(axis=0)) print(c['c'] + 'Final results DSC: \033[32;1m%f/%f/%f' % f_dsc + c['nc'])
def get_lesion_metrics(gt_lesion_mask, lesion_unet, spacing, metric_file, patient, general_flag=True, fold=1): if general_flag: dist = average_surface_distance(gt_lesion_mask, lesion_unet, spacing) tpfv = tp_fraction_seg(gt_lesion_mask, lesion_unet) fpfv = fp_fraction_seg(gt_lesion_mask, lesion_unet) dscv = dsc_seg(gt_lesion_mask, lesion_unet) tpfl = tp_fraction_det(gt_lesion_mask, lesion_unet) fpfl = fp_fraction_det(gt_lesion_mask, lesion_unet) dscl = dsc_det(gt_lesion_mask, lesion_unet) tp = true_positive_det(lesion_unet, gt_lesion_mask) gt_d = num_regions(gt_lesion_mask) lesion_s = num_voxels(lesion_unet) gt_s = num_voxels(gt_lesion_mask) pdsc = probabilistic_dsc_seg(gt_lesion_mask, lesion_unet) if metric_file: metric_file.write( '%s;%s;%s;%f;%f;%f;%f;%f;%f;%f;%d;%d;%d;%d\n' % (patient + 'gt', patient + 'pd', str(fold), dist, tpfv, fpfv, dscv, tpfl, fpfl, dscl, tp, gt_d, lesion_s, gt_s)) else: print('SurfDist TPFV FPFV DSCV ' 'TPFL FPFL DSCL ' 'TPL GTL Voxels GTV PrDSC') print('%f %f %f %f %f %f %f %d %d %d %d %f' % (dist, tpfv, fpfv, dscv, tpfl, fpfl, dscl, tp, gt_d, lesion_s, gt_s, pdsc)) return dscv else: sizes = [3, 11, 51] tpf, fpf, dscd, dscs = analysis_by_sizes(gt_lesion_mask, lesion_unet, sizes) names = '%s;%s;' % (patient + 'gt', patient + 'pd') measures = ';'.join([ '%f;%f;%f;%f' % (tpf_i, fpf_i, dscd_i, dscs_i) for tpf_i, fpf_i, dscd_i, dscs_i in zip(tpf, fpf, dscd, dscs) ]) if metric_file: metric_file.write(names + measures + '\n') else: intervals = [ '\t\t[%d-%d)\t\t|' % (mins, maxs) for mins, maxs in zip(sizes[:-1], sizes[1:]) ] intervals = ''.join(intervals) + \ '\t\t[%d-inf)\t|' % sizes[-1] measures_s = 'TPF\tFPF\tDSCd\tDSCs\t|' * len(sizes) measures = ''.join([ '%.2f\t%.2f\t%.2f\t%.2f\t|' % (tpf_i, fpf_i, dscd_i, dscs_i) for tpf_i, fpf_i, dscd_i, dscs_i in zip(tpf, fpf, dscd, dscs) ]) print(intervals) print(measures_s) print(measures)
def label_level_evaluation( path='/media/lele/DATA/brain/Brats17TrainingData/HGG7/'): patients = os.listdir(path) print len(patients) pixel_accuracy_instance = [] mean_IU_instance = [] mean_accuracy_insintance = [] frequency_weighted_IU_instance = [] label0 = 0 label1 = 0 label2 = 0 label4 = 0 dsc = [] for patient in patients: if patient[0] != 'B': continue p_name = patient patient = path + patient + '/' fs = os.listdir(patient) if len(fs) == 5: continue gt_path = patient seg_path = patient no_file = 0 for f in fs: if f[-10:-7] == 'seg': gt_path = gt_path + f if f[-10:-7] == 'est' and '.e6.' in f: seg_path = seg_path + f no_file = 1 if no_file == 0: continue seg_3d = nii2np(seg_path) gt_3d = nii2np(gt_path) labels = np.unique(gt_3d.flatten()) results = (p_name, ) + tuple( [dsc_seg(gt_3d == l, seg_3d == l) for l in labels]) text = 'Subject %s DSC: ' + '/'.join(['%f' for _ in labels[1:]]) # print(text % results) if results[4] + results[2] + results[3] > 0.1: dsc.append(results[1:]) for i in range(len(dsc)): # print dsc[i]\ label0 += dsc[i][0] label1 += dsc[i][1] label2 += dsc[i][2] label4 += dsc[i][3] label0 = label0 / len(dsc) label1 = label1 / len(dsc) label2 = label2 / len(dsc) label4 = label4 / len(dsc) print label0 print label1 print label2 print label4
def main(): options = parse_inputs() c = color_codes() # Prepare the net architecture parameters sequential = options['sequential'] dfactor = options['dfactor'] # Prepare the net hyperparameters num_classes = 5 epochs = options['epochs'] padding = options['padding'] patch_width = options['patch_width'] patch_size = (patch_width, patch_width, patch_width) batch_size = options['batch_size'] dense_size = options['dense_size'] conv_blocks = options['conv_blocks'] n_filters = options['n_filters'] filters_list = n_filters if len(n_filters) > 1 else n_filters * conv_blocks conv_width = options['conv_width'] kernel_size_list = conv_width if isinstance( conv_width, list) else [conv_width] * conv_blocks balanced = options['balanced'] # Data loading parameters preload = options['preload'] queue = options['queue'] # Prepare the sufix that will be added to the results for the net and images path = options['dir_name'] filters_s = 'n'.join(['%d' % nf for nf in filters_list]) conv_s = 'c'.join(['%d' % cs for cs in kernel_size_list]) s_s = '.s' if sequential else '.f' ub_s = '.ub' if not balanced else '' params_s = (ub_s, dfactor, s_s, patch_width, conv_s, filters_s, dense_size, epochs, padding) sufix = '%s.D%d%s.p%d.c%s.n%s.d%d.e%d.pad_%s.' % params_s n_channels = np.count_nonzero([ options['use_flair'], options['use_t2'], options['use_t1'], options['use_t1ce'] ]) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting cross-validation' + c['nc']) # N-fold cross validation main loop (we'll do 2 training iterations with testing for each patient) data_names, label_names = get_names_from_path(options) folds = options['folds'] fold_generator = izip( nfold_cross_validation(data_names, label_names, n=folds, val_data=0.25), xrange(folds)) dsc_results = list() for (train_data, train_labels, val_data, val_labels, test_data, test_labels), i in fold_generator: print( c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['nc'] + 'Fold %d/%d: ' % (i + 1, folds) + c['g'] + 'Number of training/validation/testing images (%d=%d/%d=%d/%d)' % (len(train_data), len(train_labels), len(val_data), len(val_labels), len(test_data)) + c['nc']) # Prepare the data relevant to the leave-one-out (subtract the patient from the dataset and set the path) # Also, prepare the network net_name = os.path.join( path, 'baseline-brats2017.fold%d' % i + sufix + 'mdl') # First we check that we did not train for that patient, in order to save time try: # net_name_before = os.path.join(path,'baseline-brats2017.fold0.D500.f.p13.c3c3c3c3c3.n32n32n32n32n32.d256.e1.pad_valid.mdl') net = keras.models.load_model(net_name) except IOError: print '===============================================================' # NET definition using Keras train_centers = get_cnn_centers(train_data[:, 0], train_labels, balanced=balanced) val_centers = get_cnn_centers(val_data[:, 0], val_labels, balanced=balanced) train_samples = len(train_centers) / dfactor val_samples = len(val_centers) / dfactor print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + 'Creating and compiling the model ' + c['b'] + '(%d samples)' % train_samples + c['nc']) train_steps_per_epoch = -(-train_samples / batch_size) val_steps_per_epoch = -(-val_samples / batch_size) input_shape = (n_channels, ) + patch_size # This architecture is based on the functional Keras API to introduce 3 output paths: # - Whole tumor segmentation # - Core segmentation (including whole tumor) # - Whole segmentation (tumor, core and enhancing parts) # The idea is to let the network work on the three parts to improve the multiclass segmentation. # merged_inputs = Input(shape=(4,) + patch_size, name='merged_inputs') # flair = merged_inputs model = Sequential() model.add( Conv3D(64, (3, 3, 3), strides=1, padding='same', activation='relu', data_format='channels_first', input_shape=(4, options['patch_width'], options['patch_width'], options['patch_width']))) model.add( Conv3D(64, (3, 3, 3), strides=1, padding='same', activation='relu', data_format='channels_first')) model.add( MaxPooling3D(pool_size=(3, 3, 3), strides=2, data_format='channels_first')) model.add( Conv3D(128, (3, 3, 3), strides=1, padding='same', activation='relu', data_format='channels_first')) model.add( Conv3D(128, (3, 3, 3), strides=1, padding='same', activation='relu', data_format='channels_first')) model.add( MaxPooling3D(pool_size=(3, 3, 3), strides=2, data_format='channels_first')) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) net = model # net_name_before = os.path.join(path,'baseline-brats2017.fold0.D500.f.p13.c3c3c3c3c3.n32n32n32n32n32.d256.e1.pad_valid.mdl') # net = keras.models.load_model(net_name_before) net.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + 'Training the model with a generator for ' + c['b'] + '(%d parameters)' % net.count_params() + c['nc']) print(net.summary()) net.fit_generator( generator=load_patch_batch_train( image_names=train_data, label_names=train_labels, centers=train_centers, batch_size=batch_size, size=patch_size, # fc_shape = patch_size, nlabels=num_classes, dfactor=dfactor, preload=preload, split=not sequential, datatype=np.float32), validation_data=load_patch_batch_train( image_names=val_data, label_names=val_labels, centers=val_centers, batch_size=batch_size, size=patch_size, # fc_shape = patch_size, nlabels=num_classes, dfactor=dfactor, preload=preload, split=not sequential, datatype=np.float32), # workers=queue, steps_per_epoch=train_steps_per_epoch, validation_steps=val_steps_per_epoch, max_q_size=queue, epochs=epochs) net.save(net_name) # Then we test the net. for p, gt_name in zip(test_data, test_labels): p_name = p[0].rsplit('/')[-2] patient_path = '/'.join(p[0].rsplit('/')[:-1]) outputname = os.path.join(patient_path, 'deep-brats17' + sufix + 'test.nii.gz') gt_nii = load_nii(gt_name) gt = np.copy(gt_nii.get_data()).astype(dtype=np.uint8) try: load_nii(outputname) except IOError: roi_nii = load_nii(p[0]) roi = roi_nii.get_data().astype(dtype=np.bool) centers = get_mask_voxels(roi) test_samples = np.count_nonzero(roi) image = np.zeros_like(roi).astype(dtype=np.uint8) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Creating the probability map ' + c['b'] + p_name + c['nc'] + c['g'] + ' (%d samples)>' % test_samples + c['nc']) test_steps_per_epoch = -(-test_samples / batch_size) y_pr_pred = net.predict_generator( generator=load_patch_batch_generator_test( image_names=p, centers=centers, batch_size=batch_size, size=patch_size, preload=preload, ), steps=test_steps_per_epoch, max_q_size=queue) [x, y, z] = np.stack(centers, axis=1) if not sequential: tumor = np.argmax(y_pr_pred[0], axis=1) y_pr_pred = y_pr_pred[-1] roi = np.zeros_like(roi).astype(dtype=np.uint8) roi[x, y, z] = tumor roi_nii.get_data()[:] = roi roiname = os.path.join( patient_path, 'deep-brats17' + sufix + 'test.roi.nii.gz') roi_nii.to_filename(roiname) y_pred = np.argmax(y_pr_pred, axis=1) image[x, y, z] = y_pred # Post-processing (Basically keep the biggest connected region) image = get_biggest_region(image) labels = np.unique(gt.flatten()) results = (p_name, ) + tuple( [dsc_seg(gt == l, image == l) for l in labels[1:]]) text = 'Subject %s DSC: ' + '/'.join( ['%f' for _ in labels[1:]]) print(text % results) dsc_results.append(results) print(c['g'] + ' -- Saving image ' + c['b'] + outputname + c['nc']) roi_nii.get_data()[:] = image roi_nii.to_filename(outputname)
def check_dsc(gt_name, image, nlabels): gt_nii = load_nii(gt_name) gt = np.minimum(gt_nii.get_data(), nlabels - 1).astype(dtype=np.uint8) labels = np.unique(gt.flatten()) gt_nii.uncache() return [dsc_seg(gt == l, image == l) for l in labels[1:]]
def main(): options = parse_inputs() c = color_codes() # Prepare the net architecture parameters register = options['register'] multi = options['multi'] defo = options['deformation'] layers = ''.join(options['layers']) greenspan = options['greenspan'] freeze = options['freeze'] balanced = options['balanced'] if not freeze else False # Prepare the net hyperparameters epochs = options['epochs'] padding = options['padding'] patch_width = options['patch_width'] patch_size = (32, 32) if greenspan else (patch_width, patch_width, patch_width) pool_size = options['pool_size'] batch_size = options['batch_size'] dense_size = options['dense_size'] conv_blocks = options['conv_blocks'] n_filters = options['number_filters'] n_filters = n_filters if len(n_filters) > 1 else n_filters*conv_blocks conv_width = options['conv_width'] conv_size = conv_width if isinstance(conv_width, list) else [conv_width]*conv_blocks # Prepare the sufix that will be added to the results for the net and images use_flair = options['use_flair'] use_pd = options['use_pd'] use_t2 = options['use_t2'] flair_name = 'flair' if use_flair else None pd_name = 'pd' if use_pd else None t2_name = 't2' if use_t2 else None images = filter(None, [flair_name, pd_name, t2_name]) reg_s = '.reg' if register else '' filters_s = 'n'.join(['%d' % nf for nf in n_filters]) conv_s = 'c'.join(['%d' % cs for cs in conv_size]) im_s = '.'.join(images) mc_s = '.mc' if multi else '' d_s = 'd%d.' % (conv_blocks*2+defo) if defo else '' sufix = '.greenspan' if greenspan else '%s.%s%s%s.p%d.c%s.n%s.d%d.e%d.pad_%s' %\ (mc_s, d_s, im_s, reg_s, patch_width, conv_s, filters_s, dense_size, epochs, padding) # Prepare the data names mask_name = options['mask'] wm_name = options['wm_mask'] sub_folder = options['sub_folder'] sub_name = options['flair_sub'] dir_name = options['dir_name'] patients = [f for f in sorted(os.listdir(dir_name)) if os.path.isdir(os.path.join(dir_name, f))] n_patients = len(patients) names = get_names_from_path(dir_name, options, patients) defo_names = get_defonames_from_path(dir_name, options, patients) if defo else None defo_width = conv_blocks*2+defo if defo else None defo_size = (defo_width, defo_width, defo_width) # Random initialisation seed = np.random.randint(np.iinfo(np.int32).max) # Metrics output metrics_file = os.path.join(dir_name, 'metrics' + sufix) with open(metrics_file, 'w') as f: print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting leave-one-out' + c['nc']) # Leave-one-out main loop (we'll do 2 training iterations with testing for each patient) for i in range(0, n_patients): # Prepare the data relevant to the leave-one-out (subtract the patient from the dataset and set the path) # Also, prepare the network case = patients[i] path = os.path.join(dir_name, case) names_lou = np.concatenate([names[:, :i], names[:, i + 1:]], axis=1) defo_names_lou = np.concatenate([defo_names[:, :i], defo_names[:, i + 1:]], axis=1) if defo else None print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['nc'] + 'Patient ' + c['b'] + case + c['nc'] + c['g'] + ' (%d/%d)' % (i+1, n_patients)) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Running iteration ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc']) net_name = os.path.join(path, 'deep-longitudinal.init' + sufix + '.') if greenspan: net = create_cnn_greenspan( input_channels=names.shape[0]/2, patience=25, name=net_name, epochs=500 ) images = ['axial', 'coronal', 'sagital'] else: if multi: net = create_cnn3d_det_string( cnn_path=layers, input_shape=(None, names.shape[0], patch_width, patch_width, patch_width), convo_size=conv_size, padding=padding, dense_size=dense_size, pool_size=2, number_filters=n_filters, patience=10, multichannel=True, name=net_name, epochs=100 ) else: net = create_cnn3d_longitudinal( convo_blocks=conv_blocks, input_shape=(None, names.shape[0], patch_width, patch_width, patch_width), images=images, convo_size=conv_size, pool_size=pool_size, dense_size=dense_size, number_filters=n_filters, padding=padding, drop=0.5, register=register, defo=defo, patience=10, name=net_name, epochs=100 ) names_test = get_names_from_path(path, options) defo_names_test = get_defonames_from_path(path, options) if defo else None outputname1 = os.path.join(path, 't' + case + sufix + '.iter1.nii.gz') if not greenspan else os.path.join( path, 't' + case + sufix + '.nii.gz') # First we check that we did not train for that patient, in order to save time try: net.load_params_from(net_name + 'model_weights.pkl') except IOError: print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + 'Loading the data for ' + c['b'] + 'iteration 1' + c['nc']) # Data loading. Most of it is based on functions from data_creation that load the data. # But we also need to prepare the name list to load the leave-one-out data. paths = [os.path.join(dir_name, p) for p in np.concatenate([patients[:i], patients[i+1:]])] mask_names = [os.path.join(p_path, mask_name) for p_path in paths] wm_names = [os.path.join(p_path, wm_name) for p_path in paths] pr_names = [os.path.join(p_path, sub_folder, sub_name) for p_path in paths] x_train, y_train = load_lesion_cnn_data( names=names_lou, mask_names=mask_names, defo_names=defo_names_lou, roi_names=wm_names, pr_names=pr_names, patch_size=patch_size, defo_size=defo_size, random_state=seed ) # Afterwards we train. Check the relevant training function. if greenspan: x_train = np.swapaxes(x_train, 1, 2) train_greenspan(net, x_train, y_train, images) else: train_net(net, x_train, y_train, images) with open(net_name + 'layers.pkl', 'wb') as fnet: pickle.dump(net.layers, fnet, -1) # Then we test the net. Again we save time by checking if we already tested that patient. try: image_nii = load_nii(outputname1) image1 = image_nii.get_data() except IOError: print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc']) image_nii = load_nii(os.path.join(path, options['image_folder'], options['flair_f'])) mask_nii = load_nii(os.path.join(path, wm_name)) if greenspan: image1 = test_greenspan( net, names_test, mask_nii.get_data(), batch_size, patch_size, image_nii.get_data().shape, images ) else: image1 = test_net( net, names_test, mask_nii.get_data(), batch_size, patch_size, defo_size, image_nii.get_data().shape, images, defo_names_test ) image_nii.get_data()[:] = image1 image_nii.to_filename(outputname1) if greenspan: # Since Greenspan did not use two iterations, we must get the final mask here. outputname_final = os.path.join(path, 't' + case + sufix + '.final.nii.gz') mask_nii.get_data()[:] = (image1 > 0.5).astype(dtype=np.int8) mask_nii.to_filename(outputname_final) else: # If not, we test the net with the training set to look for misclassified negative with a high # probability of being positives according to the net. # These voxels will be the input of the second training iteration. ''' Here we get the seeds ''' print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Looking for seeds for the final iteration>' + c['nc']) patients_names = zip(np.rollaxis(names_lou, 1), np.rollaxis(defo_names_lou, 1)) if defo\ else np.rollaxis(names_lou, 1) for patient in patients_names: if defo: patient, d_patient = patient else: d_patient = None patient_path = '/'.join(patient[0].rsplit('/')[:-1]) outputname = os.path.join(patient_path, 't' + case + sufix + '.nii.gz') mask_nii = load_nii(os.path.join('/'.join(patient[0].rsplit('/')[:-3]), wm_name)) try: load_nii(outputname) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + ' Patient ' + patient[0].rsplit('/')[-4] + ' already done' + c['nc']) except IOError: print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + ' Testing with patient ' + c['b'] + patient[0].rsplit('/')[-4] + c['nc']) image_nii = load_nii(patient[0]) image = test_net( net, patient, mask_nii.get_data(), batch_size, patch_size, defo_size, image_nii.get_data().shape, images, d_patient ) print(c['g'] + ' -- Saving image ' + c['b'] + outputname + c['nc']) image_nii.get_data()[:] = image image_nii.to_filename(outputname) ''' Here we perform the last iteration ''' # Finally we perform the final iteration. After refactoring the code, the code looks almost exactly # the same as the training of the first iteration. print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Running iteration ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc']) f_s = '.f' if freeze else '' ub_s = '.ub' if not balanced else '' final_s = f_s + ub_s outputname2 = os.path.join(path, 't' + case + final_s + sufix + '.iter2.nii.gz') net_name = os.path.join(path, 'deep-longitudinal.final' + final_s + sufix + '.') if multi: net = create_cnn3d_det_string( cnn_path=layers, input_shape=(None, names.shape[0], patch_width, patch_width, patch_width), convo_size=conv_size, padding=padding, pool_size=2, dense_size=dense_size, number_filters=n_filters, patience=50, multichannel=True, name=net_name, epochs=epochs ) else: if not freeze: net = create_cnn3d_longitudinal( convo_blocks=conv_blocks, input_shape=(None, names.shape[0], patch_width, patch_width, patch_width), images=images, convo_size=conv_size, pool_size=pool_size, dense_size=dense_size, number_filters=n_filters, padding=padding, drop=0.5, register=register, defo=defo, patience=50, name=net_name, epochs=epochs ) else: net.max_epochs = epochs net.on_epoch_finished[0].name = net_name + 'model_weights.pkl' for layer in net.get_all_layers(): if not isinstance(layer, DenseLayer): for param in layer.params: layer.params[param].discard('trainable') try: net.load_params_from(net_name + 'model_weights.pkl') except IOError: print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + 'Loading the data for ' + c['b'] + 'iteration 2' + c['nc']) roi_paths = ['/'.join(name.rsplit('/')[:-1]) for name in names_lou[0, :]] paths = [os.path.join(dir_name, p) for p in np.concatenate([patients[:i], patients[i + 1:]])] ipr_names = [os.path.join(p_path, sub_folder, sub_name) for p_path in paths] if freeze else None pr_names = [os.path.join(p_path, 't' + case + sufix + '.nii.gz') for p_path in roi_paths] mask_names = [os.path.join(p_path, mask_name) for p_path in paths] wm_names = [os.path.join(p_path, wm_name) for p_path in paths] x_train, y_train = load_lesion_cnn_data( names=names_lou, mask_names=mask_names, defo_names=defo_names_lou, roi_names=wm_names, init_pr_names=ipr_names, pr_names=pr_names, patch_size=patch_size, defo_size=defo_size, random_state=seed, balanced=balanced ) train_net(net, x_train, y_train, images) with open(net_name + 'layers.pkl', 'wb') as fnet: pickle.dump(net.layers, fnet, -1) try: image_nii = load_nii(outputname2) image2 = image_nii.get_data() except IOError: print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc']) image_nii = load_nii(os.path.join(path, options['image_folder'], options['flair_f'])) mask_nii = load_nii(os.path.join(path, wm_name)) image2 = test_net( net, names_test, mask_nii.get_data(), batch_size, patch_size, defo_size, image_nii.get_data().shape, images, defo_names_test ) image_nii.get_data()[:] = image2 image_nii.to_filename(outputname2) image = image1 * image2 image_nii.get_data()[:] = image outputname_mult = os.path.join(path, 't' + case + final_s + sufix + '.iter1_x_2.nii.gz') image_nii.to_filename(outputname_mult) image = (image1 * image2) > 0.5 image_nii.get_data()[:] = image outputname_final = os.path.join(path, 't' + case + final_s + sufix + '.final.nii.gz') image_nii.to_filename(outputname_final) # Finally we compute some metrics that are stored in the metrics file defined above. # I plan on replicating Challenge's 2008 evaluation measures here. gt = load_nii(os.path.join(path, mask_name)).get_data().astype(dtype=np.bool) seg1 = image1 > 0.5 if not greenspan: seg2 = image2 > 0.5 dsc1 = dsc_seg(gt, seg1) if not greenspan: dsc2 = dsc_seg(gt, seg2) if not greenspan: dsc_final = dsc_seg(gt, image) else: dsc_final = dsc1 tpf1 = tp_fraction_seg(gt, seg1) if not greenspan: tpf2 = tp_fraction_seg(gt, seg2) if not greenspan: tpf_final = tp_fraction_seg(gt, image) fpf1 = fp_fraction_seg(gt, seg1) if not greenspan: fpf2 = fp_fraction_seg(gt, seg2) if not greenspan: fpf_final = fp_fraction_seg(gt, image) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<DSC ' + c['c'] + case + c['g'] + ' = ' + c['b'] + str(dsc_final) + c['nc'] + c['g'] + '>' + c['nc']) f.write('%s;Test 1; %f;%f;%f\n' % (case, dsc1, tpf1, fpf1)) if not greenspan: f.write('%s;Test 2; %f;%f;%f\n' % (case, dsc2, tpf2, fpf2)) if not greenspan: f.write('%s;Final; %f;%f;%f\n' % (case, dsc_final, tpf_final, fpf_final))
def main(): options = parse_inputs() c = color_codes() # Prepare the net hyperparameters epochs = options['epochs'] patch_width = options['patch_width'] patch_size = (patch_width, patch_width, patch_width) dense_size = options['dense_size'] conv_blocks = options['conv_blocks'] n_filters = options['n_filters'] filters_list = n_filters if len(n_filters) > 1 else n_filters * conv_blocks conv_width = options['conv_width'] kernel_size_list = conv_width if isinstance(conv_width, list) else [conv_width] * conv_blocks balanced = options['balanced'] # Data loading parameters downsample = options['downsample'] preload = options['preload'] shuffle = options['shuffle'] # Prepare the sufix that will be added to the results for the net and images filters_s = 'n'.join(['%d' % nf for nf in filters_list]) conv_s = 'c'.join(['%d' % cs for cs in kernel_size_list]) unbalanced_s = '.ub' if not balanced else '' shuffle_s = '.s' if shuffle else '' params_s = (unbalanced_s, shuffle_s, patch_width, conv_s, filters_s, dense_size, downsample) sufix = '%s%s.p%d.c%s.n%s.d%d.D%d' % params_s preload_s = ' (with %spreloading%s%s)' % (c['b'], c['nc'], c['c']) if preload else '' print('%s[%s] Starting training%s%s' % (c['c'], strftime("%H:%M:%S"), preload_s, c['nc'])) train_data, _ = get_names_from_path(options) test_data, test_labels = get_names_from_path(options, False) input_shape = (train_data.shape[1],) + patch_size dsc_results = list() dsc_results_pr = list() train_data, train_labels = get_names_from_path(options) centers_s = np.random.permutation( get_cnn_centers(train_data[:, 0], train_labels, balanced=balanced) )[::downsample] x_seg, y_seg = load_patches_ganseg_by_batches( image_names=train_data, label_names=train_labels, source_centers=centers_s, size=patch_size, nlabels=2, preload=preload, ) for i, (p, gt_name) in enumerate(zip(test_data, test_labels)): p_name = p[0].rsplit('/')[-3] patient_path = '/'.join(p[0].rsplit('/')[:-1]) print('%s[%s] %sCase %s%s%s%s%s (%d/%d):%s' % ( c['c'], strftime("%H:%M:%S"), c['nc'], c['c'], c['b'], p_name, c['nc'], c['c'], i + 1, len(test_data), c['nc'] )) # NO DSC objective image_cnn_name = os.path.join(patient_path, p_name + '.cnn.test%s.e%d' % (shuffle_s, epochs)) image_gan_name = os.path.join(patient_path, p_name + '.gan.test%s.e%d' % (shuffle_s, epochs)) # DSC objective image_cnn_dsc_name = os.path.join(patient_path, p_name + '.dsc-cnn.test%s.e%d' % (shuffle_s, epochs)) image_gan_dsc_name = os.path.join(patient_path, p_name + '.dsc-gan.test%s.e%d' % (shuffle_s, epochs)) try: # NO DSC objective image_cnn = load_nii(image_cnn_name + '.nii.gz').get_data() image_cnn_pr = load_nii(image_cnn_name + '.pr.nii.gz').get_data() image_gan = load_nii(image_gan_name + '.nii.gz').get_data() image_gan_pr = load_nii(image_gan_name + '.pr.nii.gz').get_data() # DSC objective image_cnn_dsc = load_nii(image_cnn_dsc_name + '.nii.gz').get_data() image_cnn_dsc_pr = load_nii(image_cnn_dsc_name + '.pr.nii.gz').get_data() image_gan_dsc = load_nii(image_gan_dsc_name + '.nii.gz').get_data() image_gan_dsc_pr = load_nii(image_gan_dsc_name + '.pr.nii.gz').get_data() except IOError: # Lesion segmentation adversarial_w = K.variable(0) # NO DSC objective cnn, gan, gan_test = get_wmh_nets( input_shape=input_shape, filters_list=filters_list, kernel_size_list=kernel_size_list, dense_size=dense_size, lambda_var=adversarial_w ) # DSC objective cnn_dsc, gan_dsc, gan_dsc_test = get_wmh_nets( input_shape=input_shape, filters_list=filters_list, kernel_size_list=kernel_size_list, dense_size=dense_size, lambda_var=adversarial_w, dsc_obj=True ) train_nets( gan=gan, gan_dsc=gan_dsc, cnn=cnn, cnn_dsc=cnn_dsc, p=p, x=x_seg, y=y_seg, name='wmh2017' + sufix, adversarial_w=adversarial_w ) # NO DSC objective image_cnn = test_net(cnn, p, image_cnn_name) image_cnn_pr = load_nii(image_cnn_name + '.pr.nii.gz').get_data() image_gan = test_net(gan_test, p, image_gan_name) image_gan_pr = load_nii(image_gan_name + '.pr.nii.gz').get_data() # DSC objective image_cnn_dsc = test_net(cnn_dsc, p, image_cnn_dsc_name) image_cnn_dsc_pr = load_nii(image_cnn_dsc_name + '.pr.nii.gz').get_data() image_gan_dsc = test_net(gan_dsc_test, p, image_gan_dsc_name) image_gan_dsc_pr = load_nii(image_gan_dsc_name + '.pr.nii.gz').get_data() # NO DSC objective seg_cnn = image_cnn.astype(np.bool) seg_gan = image_gan.astype(np.bool) # DSC objective seg_cnn_dsc = image_cnn_dsc.astype(np.bool) seg_gan_dsc = image_gan_dsc.astype(np.bool) seg_gt = load_nii(gt_name).get_data() not_roi = np.logical_not(seg_gt == 2) results_cnn_dsc = dsc_seg(seg_gt == 1, np.logical_and(seg_cnn_dsc, not_roi)) results_cnn_dsc_pr = probabilistic_dsc_seg(seg_gt == 1, image_cnn_dsc_pr * not_roi) results_cnn = dsc_seg(seg_gt == 1, np.logical_and(seg_cnn, not_roi)) results_cnn_pr = probabilistic_dsc_seg(seg_gt == 1, image_cnn_pr * not_roi) results_gan_dsc = dsc_seg(seg_gt == 1, np.logical_and(seg_gan_dsc, not_roi)) results_gan_dsc_pr = probabilistic_dsc_seg(seg_gt == 1, image_gan_dsc_pr * not_roi) results_gan = dsc_seg(seg_gt == 1, np.logical_and(seg_gan, not_roi)) results_gan_pr = probabilistic_dsc_seg(seg_gt == 1, image_gan_pr * not_roi) whites = ''.join([' '] * 14) print('%sCase %s%s%s%s %sCNN%s vs %sGAN%s DSC: %s%f%s (%s%f%s) vs %s%f%s (%s%f%s)' % ( whites, c['c'], c['b'], p_name, c['nc'], c['lgy'], c['nc'], c['y'], c['nc'], c['lgy'], results_cnn_dsc, c['nc'], c['lgy'], results_cnn, c['nc'], c['y'], results_gan_dsc, c['nc'], c['y'], results_gan, c['nc'] )) print('%sCase %s%s%s%s %sCNN%s vs %sGAN%s DSC Pr: %s%f%s (%s%f%s) vs %s%f%s (%s%f%s)' % ( whites, c['c'], c['b'], p_name, c['nc'], c['lgy'], c['nc'], c['y'], c['nc'], c['lgy'], results_cnn_dsc_pr, c['nc'], c['lgy'], results_cnn_pr, c['nc'], c['y'], results_gan_dsc_pr, c['nc'], c['y'], results_gan_pr, c['nc'] )) dsc_results.append((results_cnn_dsc, results_cnn, results_gan_dsc, results_gan)) dsc_results_pr.append((results_cnn_dsc_pr, results_cnn_pr, results_gan_dsc_pr, results_gan_pr)) final_dsc = tuple(np.mean(dsc_results, axis=0)) final_dsc_pr = tuple(np.mean(dsc_results_pr, axis=0)) print('Final results DSC: %s%f%s (%s%f%s) vs %s%f%s (%s%f%s)' % ( c['lgy'], final_dsc[0], c['nc'], c['lgy'], final_dsc[1], c['nc'], c['y'], final_dsc[2], c['nc'], c['y'], final_dsc[3], c['nc'] )) print('Final results DSC Pr: %s%f%s (%s%f%s) vs %s%f%s (%s%f%s)' % ( c['lgy'], final_dsc_pr[0], c['nc'], c['lgy'], final_dsc_pr[1], c['nc'], c['y'], final_dsc_pr[2], c['nc'], c['y'], final_dsc_pr[3], c['nc'] ))
def train_test_seg(net_name, n_folds, val_split=0.1): # Init c = color_codes() options = parse_inputs() depth = options['blocks'] filters = options['filters'] d_path = options['loo_dir'] unc_path = os.path.join(d_path, 'uncertainty') if not os.path.isdir(unc_path): os.mkdir(unc_path) seg_path = os.path.join(d_path, 'segmentation') if not os.path.isdir(seg_path): os.mkdir(seg_path) patients = get_dirs(d_path) cbica = filter(lambda p: 'CBICA' in p, patients) tcia = filter(lambda p: 'TCIA' in p, patients) tmc = filter(lambda p: 'TMC' in p, patients) b2013 = filter(lambda p: '2013' in p, patients) for i in range(n_folds): print( '%s[%s] %sFold %s(%s%d%s%s/%d)%s' % ( c['c'], strftime("%H:%M:%S"), c['g'], c['c'], c['b'], i + 1, c['nc'], c['c'], n_folds, c['nc'] ) ) # Training itself # Data split (using the patient names) for train and validation. # We also compute the number of batches for both training and # validation according to the batch size. ''' Training ''' ini_cbica = len(cbica) * i / n_folds end_cbica = len(cbica) * (i + 1) / n_folds fold_cbica = cbica[:ini_cbica] + cbica[end_cbica:] n_fold_cbica = len(fold_cbica) n_cbica = int(n_fold_cbica * (1 - val_split)) ini_tcia = len(tcia) * i / n_folds end_tcia = len(tcia) * (i + 1) / n_folds fold_tcia = tcia[:ini_tcia] + tcia[end_tcia:] n_fold_tcia = len(fold_tcia) n_tcia = int(n_fold_tcia * (1 - val_split)) ini_tmc = len(tmc) * i / n_folds end_tmc = len(tmc) * (i + 1) / n_folds fold_tmc = tmc[:ini_tmc] + tmc[end_tmc:] n_fold_tmc = len(fold_tmc) n_tmc = int(n_fold_tmc * (1 - val_split)) ini_b2013 = len(b2013) * i / n_folds end_b2013 = len(b2013) * (i + 1) / n_folds fold_b2013 = b2013[:ini_b2013] + b2013[end_b2013:] n_fold_b2013 = len(fold_b2013) n_b2013 = int(n_fold_b2013 * (1 - val_split)) training_n = n_fold_cbica + n_fold_tcia + n_fold_tmc + n_fold_b2013 testing_n = len(patients) - training_n print( 'Training / testing samples = %d / %d' % ( training_n, testing_n ) ) # Training train_cbica = fold_cbica[:n_cbica] train_tcia = fold_tcia[:n_tcia] train_tmc = fold_tmc[:n_tmc] train_b2013 = fold_b2013[:n_b2013] train_patients = train_cbica + train_tcia + train_tmc + train_b2013 # Validation val_cbica = fold_cbica[n_cbica:] val_tcia = fold_tcia[n_tcia:] val_tmc = fold_tmc[n_tmc:] val_b2013 = fold_b2013[n_b2013:] val_patients = val_cbica + val_tcia + val_tmc + val_b2013 model_name = '%s-f%d.mdl' % (net_name, i) net = BratsSegmentationNet(depth=depth, filters=filters) # train_seg(net, model_name, train_patients, val_patients) train_seg( net, model_name, train_patients, val_patients, dropout=0 ) # model_name = '%s-f%d-R.mdl' % (net_name, i) # train_seg( # net, model_name, train_patients, val_patients, # refine=True, dropout=0.5, lr=1e-2 # ) # Testing data (with GT) test_cbica = cbica[ini_cbica:end_cbica] test_tcia = tcia[ini_tcia:end_tcia] test_tmc = tmc[ini_tmc:end_tmc] test_b2013 = b2013[ini_b2013:end_b2013] test_patients = test_cbica + test_tcia + test_tmc + test_b2013 patient_paths = map(lambda p: os.path.join(d_path, p), test_patients) _, test_x = get_images(test_patients) print( 'Testing patients (with GT) = %d' % ( len(test_patients) ) ) # The sub-regions considered for evaluation are: # 1) the "enhancing tumor" (ET) # 2) the "tumor core" (TC) # 3) the "whole tumor" (WT) # # The provided segmentation labels have values of 1 for NCR & NET, # 2 for ED, 4 for ET, and 0 for everything else. # The participants are called to upload their segmentation labels # as a single multi-label file in nifti (.nii.gz) format. # # The participants are called to upload 4 nifti (.nii.gz) volumes # (3 uncertainty maps and 1 multi-class segmentation volume from # Task 1) onto CBICA's Image Processing Portal format. For example, # for each ID in the dataset, participants are expected to upload # following 4 volumes: # 1. {ID}.nii.gz (multi-class label map) # 2. {ID}_unc_whole.nii.gz (Uncertainty map associated with whole tumor) # 3. {ID}_unc_core.nii.gz (Uncertainty map associated with tumor core) # 4. {ID}_unc_enhance.nii.gz (Uncertainty map associated with enhancing tumor) for p, (path_i, p_i, test_i) in enumerate(zip( patient_paths, test_patients, test_x )): pred_i = net.segment([test_i])[0] # unc_i = net.uncertainty([test_i], steps=25)[0] # whole_i = np.sum(unc_i[1:]) # core_i = unc_i[1] + unc_i[-1] # enhance_i = unc_i[-1] seg_i = np.argmax(pred_i, axis=0) seg_i[seg_i == 3] = 4 # seg_unc_i = np.argmax(unc_i, axis=0) # seg_unc_i[seg_unc_i == 3] = 4 # tumor_mask = remove_small_regions( # seg_i.astype(np.bool), min_size=30 # ) # # seg_i[log_not(tumor_mask)] = 0 # seg_unc_i[log_not(tumor_mask)] = 0 # # whole_i *= tumor_mask.astype(np.float32) # core_i *= tumor_mask.astype(np.float32) # enhance_i *= tumor_mask.astype(np.float32) niiname = os.path.join(path_i, p_i + '_seg.nii.gz') nii = load_nii(niiname) seg = nii.get_data() dsc = map( lambda label: dsc_seg(seg == label, seg_i == label), [1, 2, 4] ) # dsc_unc = map( # lambda label: dsc_seg(seg == label, seg_unc_i == label), # [1, 2, 4] # ) nii.get_data()[:] = seg_i save_nii(nii, os.path.join(seg_path, p_i + '.nii.gz')) # nii.get_data()[:] = seg_unc_i # save_nii(nii, os.path.join(unc_path, p_i + '.nii.gz')) # niiname = os.path.join(d_path, p_i, p_i + '_flair.nii.gz') # nii = load_nii(niiname) # nii.get_data()[:] = whole_i # save_nii(nii, os.path.join(unc_path, p_i + '_unc_whole.nii.gz')) # nii.get_data()[:] = core_i # save_nii(nii, os.path.join(unc_path, p_i + '_unc_core.nii.gz')) # nii.get_data()[:] = enhance_i # save_nii(nii, os.path.join(unc_path, p_i + '_unc_enhance.nii.gz')) print( 'Segmentation - Patient %s (%d/%d): %s' % ( p_i, p, len(test_x), ' / '.join(map(str, dsc)) ) )
def main(): options = parse_inputs() c = color_codes() # Prepare the net architecture parameters sequential = options['sequential'] dfactor = options['dfactor'] # Prepare the net hyperparameters num_classes = 5 epochs = options['epochs'] padding = options['padding'] patch_width = options['patch_width'] patch_size = (patch_width, patch_width, patch_width) batch_size = options['batch_size'] dense_size = options['dense_size'] conv_blocks = options['conv_blocks'] n_filters = options['n_filters'] filters_list = n_filters if len(n_filters) > 1 else n_filters * conv_blocks conv_width = options['conv_width'] kernel_size_list = conv_width if isinstance( conv_width, list) else [conv_width] * conv_blocks balanced = options['balanced'] recurrent = options['recurrent'] # Data loading parameters preload = options['preload'] queue = options['queue'] # Prepare the sufix that will be added to the results for the net and images path = options['dir_name'] filters_s = 'n'.join(['%d' % nf for nf in filters_list]) conv_s = 'c'.join(['%d' % cs for cs in kernel_size_list]) s_s = '.s' if sequential else '.f' ub_s = '.ub' if not balanced else '' params_s = (ub_s, dfactor, s_s, patch_width, conv_s, filters_s, dense_size, epochs, padding) sufix = '%s.D%d%s.p%d.c%s.n%s.d%d.e%d.pad_%s.' % params_s n_channels = np.count_nonzero([ options['use_flair'], options['use_t2'], options['use_t1'], options['use_t1ce'] ]) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting cross-validation' + c['nc']) # N-fold cross validation main loop (we'll do 2 training iterations with testing for each patient) data_names, label_names = get_names_from_path(options) folds = options['folds'] fold_generator = izip( nfold_cross_validation(data_names, label_names, n=folds, val_data=0.25), xrange(folds)) dsc_results = list() for (train_data, train_labels, val_data, val_labels, test_data, test_labels), i in fold_generator: print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['nc'] + 'Fold %d/%d: ' % (i + 1, folds) + c['g'] + 'Number of training/validation/testing images (%d=%d/%d=%d/%d)' % (len(train_data), len(train_labels), len(val_data), len(val_labels), len(test_data)) + c['nc']) # Prepare the data relevant to the leave-one-out (subtract the patient from the dataset and set the path) # Also, prepare the network net_name = os.path.join( path, 'baseline-brats2017.fold%d' % i + sufix + 'mdl') # First we check that we did not train for that patient, in order to save time try: net = keras.models.load_model(net_name) except IOError: # NET definition using Keras train_centers = get_cnn_centers(train_data[:, 0], train_labels, balanced=balanced) val_centers = get_cnn_centers(val_data[:, 0], val_labels, balanced=balanced) train_samples = len(train_centers) / dfactor val_samples = len(val_centers) / dfactor print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + 'Creating and compiling the model ' + c['b'] + '(%d samples)' % train_samples + c['nc']) train_steps_per_epoch = -(-train_samples / batch_size) val_steps_per_epoch = -(-val_samples / batch_size) input_shape = (n_channels, ) + patch_size if sequential: # Sequential model that merges all 4 images. This architecture is just a set of convolutional blocks # that end in a dense layer. This is supposed to be an original baseline. net = Sequential() net.add( Conv3D(filters_list[0], kernel_size=kernel_size_list[0], input_shape=input_shape, activation='relu', data_format='channels_first')) for filters, kernel_size in zip(filters_list[1:], kernel_size_list[1:]): net.add(Dropout(0.5)) net.add( Conv3D(filters, kernel_size=kernel_size, activation='relu', data_format='channels_first')) net.add(Dropout(0.5)) net.add(Flatten()) net.add(Dense(dense_size, activation='relu')) net.add(Dropout(0.5)) net.add(Dense(num_classes, activation='softmax')) else: # This architecture is based on the functional Keras API to introduce 3 output paths: # - Whole tumor segmentation # - Core segmentation (including whole tumor) # - Whole segmentation (tumor, core and enhancing parts) # The idea is to let the network work on the three parts to improve the multiclass segmentation. merged_inputs = Input(shape=(4, ) + patch_size, name='merged_inputs') flair = Reshape((1, ) + patch_size)(Lambda( lambda l: l[:, 0, :, :, :], output_shape=(1, ) + patch_size)(merged_inputs), ) t2 = Reshape((1, ) + patch_size)(Lambda( lambda l: l[:, 1, :, :, :], output_shape=(1, ) + patch_size)(merged_inputs)) t1 = Lambda(lambda l: l[:, 2:, :, :, :], output_shape=(2, ) + patch_size)(merged_inputs) for filters, kernel_size in zip(filters_list, kernel_size_list): flair = Conv3D(filters, kernel_size=kernel_size, activation='relu', data_format='channels_first')(flair) t2 = Conv3D(filters, kernel_size=kernel_size, activation='relu', data_format='channels_first')(t2) t1 = Conv3D(filters, kernel_size=kernel_size, activation='relu', data_format='channels_first')(t1) flair = Dropout(0.5)(flair) t2 = Dropout(0.5)(t2) t1 = Dropout(0.5)(t1) # We only apply the RCNN to the multioutput approach (we keep the simple one, simple) if recurrent: flair = Conv3D(dense_size, kernel_size=(1, 1, 1), activation='relu', data_format='channels_first', name='fcn_flair')(flair) flair = Dropout(0.5)(flair) t2 = concatenate([flair, t2], axis=1) t2 = Conv3D(dense_size, kernel_size=(1, 1, 1), activation='relu', data_format='channels_first', name='fcn_t2')(t2) t2 = Dropout(0.5)(t2) t1 = concatenate([t2, t1], axis=1) t1 = Conv3D(dense_size, kernel_size=(1, 1, 1), activation='relu', data_format='channels_first', name='fcn_t1')(t1) t1 = Dropout(0.5)(t1) flair = Dropout(0.5)(flair) t2 = Dropout(0.5)(t2) t1 = Dropout(0.5)(t1) lstm_instance = LSTM(dense_size, implementation=1, name='rf_layer') flair = lstm_instance( Permute((2, 1))(Reshape((dense_size, -1))(flair))) t2 = lstm_instance( Permute((2, 1))(Reshape((dense_size, -1))(t2))) t1 = lstm_instance( Permute((2, 1))(Reshape((dense_size, -1))(t1))) else: flair = Flatten()(flair) t2 = Flatten()(t2) t1 = Flatten()(t1) flair = Dense(dense_size, activation='relu')(flair) flair = Dropout(0.5)(flair) t2 = concatenate([flair, t2]) t2 = Dense(dense_size, activation='relu')(t2) t2 = Dropout(0.5)(t2) t1 = concatenate([t2, t1]) t1 = Dense(dense_size, activation='relu')(t1) t1 = Dropout(0.5)(t1) tumor = Dense(2, activation='softmax', name='tumor')(flair) core = Dense(3, activation='softmax', name='core')(t2) enhancing = Dense(num_classes, activation='softmax', name='enhancing')(t1) net = Model(inputs=merged_inputs, outputs=[tumor, core, enhancing]) net.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy']) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + 'Training the model with a generator for ' + c['b'] + '(%d parameters)' % net.count_params() + c['nc']) print(net.summary()) net.fit_generator( generator=load_patch_batch_train(image_names=train_data, label_names=train_labels, centers=train_centers, batch_size=batch_size, size=patch_size, nlabels=num_classes, dfactor=dfactor, preload=preload, split=not sequential, datatype=np.float32), validation_data=load_patch_batch_train(image_names=val_data, label_names=val_labels, centers=val_centers, batch_size=batch_size, size=patch_size, nlabels=num_classes, dfactor=dfactor, preload=preload, split=not sequential, datatype=np.float32), steps_per_epoch=train_steps_per_epoch, validation_steps=val_steps_per_epoch, max_q_size=queue, epochs=epochs) net.save(net_name) # Then we test the net. use_gt = options['use_gt'] for p, gt_name in zip(test_data, test_labels): p_name = p[0].rsplit('/')[-2] patient_path = '/'.join(p[0].rsplit('/')[:-1]) outputname = os.path.join(patient_path, 'deep-brats17' + sufix + 'test.nii.gz') try: load_nii(outputname) except IOError: roi_nii = load_nii(p[0]) roi = roi_nii.get_data().astype(dtype=np.bool) centers = get_mask_voxels(roi) test_samples = np.count_nonzero(roi) image = np.zeros_like(roi).astype(dtype=np.uint8) print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] + '<Creating the probability map ' + c['b'] + p_name + c['nc'] + c['g'] + ' (%d samples)>' % test_samples + c['nc']) test_steps_per_epoch = -(-test_samples / batch_size) y_pr_pred = net.predict_generator( generator=load_patch_batch_generator_test( image_names=p, centers=centers, batch_size=batch_size, size=patch_size, preload=preload, ), steps=test_steps_per_epoch, max_q_size=queue) [x, y, z] = np.stack(centers, axis=1) if not sequential: tumor = np.argmax(y_pr_pred[0], axis=1) y_pr_pred = y_pr_pred[-1] roi = np.zeros_like(roi).astype(dtype=np.uint8) roi[x, y, z] = tumor roi_nii.get_data()[:] = roi roiname = os.path.join( patient_path, 'deep-brats17' + sufix + 'test.roi.nii.gz') roi_nii.to_filename(roiname) y_pred = np.argmax(y_pr_pred, axis=1) image[x, y, z] = y_pred # Post-processing (Basically keep the biggest connected region) image = get_biggest_region(image) if use_gt: gt_nii = load_nii(gt_name) gt = np.copy(gt_nii.get_data()).astype(dtype=np.uint8) labels = np.unique(gt.flatten()) results = (p_name, ) + tuple( [dsc_seg(gt == l, image == l) for l in labels[1:]]) text = 'Subject %s DSC: ' + '/'.join( ['%f' for _ in labels[1:]]) print(text % results) dsc_results.append(results) print(c['g'] + ' -- Saving image ' + c['b'] + outputname + c['nc']) roi_nii.get_data()[:] = image roi_nii.to_filename(outputname)
def check_dsc(gt_name, image): gt_nii = load_nii(gt_name) gt = np.copy(gt_nii.get_data()).astype(dtype=np.uint8) labels = np.unique(gt.flatten()) return [dsc_seg(gt == l, image == l) for l in labels[1:]]