def test_loader(): nn_input_shape = (32, ) * 3 norm_patch_shape = (32, ) * 3 preprocessors = [ AugmentFPRCandidates( candidates_csv="candidates_V2", tags=["luna:3d"], output_shape=nn_input_shape, norm_patch_shape=norm_patch_shape, augmentation_params={ "scale": [1, 1, 1], # factor "uniform scale": 1, # factor "rotation": [0, 0, 0], # degrees "shear": [0, 0, 0], # deg "translation": [0, 0, 0], # mm "reflection": [0, 0, 0] }, # Bernoulli p interp_order=1), DefaultNormalizer(tags=["luna:3d"]) ] l = LunaDataLoader(only_positive=True, multiprocess=False, sets=TRAINING, preprocessors=preprocessors) l.prepare() chunk_size = 1 batches = l.generate_batch(chunk_size=chunk_size, required_input={ "luna:3d": (chunk_size, ) + nn_input_shape, "luna:pixelspacing": (chunk_size, 3) }, required_output={"luna:target": (chunk_size, )}) for sample in batches: import utils.plt print sample[INPUT]["luna:3d"].shape, sample[OUTPUT][ "luna:target"], sample[INPUT]["luna:pixelspacing"] utils.plt.show_animate(np.clip(sample[INPUT]["luna:3d"][0] + 0.25, 0, 1), 50, normalize=False)
def process(self, sample): orig_augment = sample_augmentation_parameters(self.augmentation_params) for tag in self.tags: pixelspacingtag = tag.split(':')[0] + ":pixelspacing" labelstag = tag.split(':')[0] + ":labels" origintag = tag.split(':')[0] + ":origin" assert pixelspacingtag in sample[ INPUT], "tag %s not found" % pixelspacingtag assert labelstag in sample[INPUT], "tag %s not found" % labelstag assert origintag in sample[INPUT], "tag %s not found" % origintag spacing = sample[INPUT][pixelspacingtag] labels = sample[INPUT][labelstag] origin = sample[INPUT][origintag] label = random.choice(labels) labelloc = LunaDataLoader.world_to_voxel_coordinates( label[:3], origin=origin, spacing=spacing) if tag in sample[INPUT]: volume = sample[INPUT][tag] augment_p = dict(orig_augment) #augment_p["translation"] = augment_p["translation"] + (0.5*np.array(volume.shape)-labelloc)*spacing sample[INPUT][tag] = lio_augment( volume=volume, pixel_spacing=spacing, output_shape=self.output_shape, norm_patch_shape=self.norm_patch_size, augment_p=augment_p, center_to_shift=-labelloc) elif tag in sample[OUTPUT]: volume = sample[OUTPUT][tag] augment_p = dict(orig_augment) #augment_p["translation"] = augment_p["translation"] + (0.5*np.array(volume.shape)-labelloc)*spacing sample[OUTPUT][tag] = lio_augment( volume=volume, pixel_spacing=spacing, output_shape=self.output_shape, norm_patch_shape=self.norm_patch_size, augment_p=augment_p, center_to_shift=-labelloc, cval=0.0) else: pass
def process(self, sample): augment_p = sample_augmentation_parameters(self.augmentation_params) for tag in self.tags: pixelspacingtag = tag.split(':')[0] + ":pixelspacing" labelstag = tag.split(':')[0] + ":labels" origintag = tag.split(':')[0] + ":origin" assert pixelspacingtag in sample[ INPUT], "tag %s not found" % pixelspacingtag assert labelstag in sample[INPUT], "tag %s not found" % labelstag assert origintag in sample[INPUT], "tag %s not found" % origintag spacing = sample[INPUT][pixelspacingtag] labels = sample[INPUT][labelstag] origin = sample[INPUT][origintag] label = random.choice(labels) from application.luna import LunaDataLoader labelloc = LunaDataLoader.world_to_voxel_coordinates( label[:3], origin=origin, spacing=spacing) if tag in sample[INPUT]: volume = sample[INPUT][tag] sample[INPUT][tag] = augment_3d( volume=volume, pixel_spacing=spacing, output_shape=self.output_shape, norm_patch_shape=self.norm_patch_shape, augment_p=augment_p, center_to_shift=-labelloc) elif tag in sample[OUTPUT]: volume = sample[OUTPUT][tag] sample[OUTPUT][tag] = augment_3d( volume=volume, pixel_spacing=spacing, output_shape=self.output_shape, norm_patch_shape=self.norm_patch_shape, augment_p=augment_p, center_to_shift=-labelloc, cval=0.0) else: pass
def process(self, sample): augment_p = sample_augmentation_parameters(self.augmentation_params) tag = self.tags[0] basetag = tag.split(':')[0] pixelspacingtag = basetag + ":pixelspacing" patient_idtag = basetag + ":patient_id" origintag = basetag + ":origin" spacing = sample[INPUT][pixelspacingtag] patient_id = sample[INPUT][patient_idtag] candidates = self.candidates[patient_id] origin = sample[INPUT][origintag] if len(candidates) == 1: candidate = random.choice(candidates[0]) elif len(candidates) == 2: percentage_chance = 0.5 if random.random() < percentage_chance: candidate = random.choice(candidates[1]) else: candidate = random.choice(candidates[0]) else: raise Exception("candidates is empty") #print 'candidate', candidate candidateloc = LunaDataLoader.world_to_voxel_coordinates( candidate[:3], origin=origin, spacing=spacing) volume = sample[INPUT][basetag + ":3d"] sample[INPUT][basetag + ":3d"] = augment_3d( volume=volume, pixel_spacing=spacing, output_shape=self.output_shape, norm_patch_shape=self.norm_patch_shape, augment_p=augment_p, center_to_shift=-candidateloc) # add candidate label to output tags sample[OUTPUT][basetag + ":target"] = np.int32(candidate[3])
output_shape=(IMAGE_SIZE,IMAGE_SIZE,IMAGE_SIZE), # in pixels norm_patch_size=(IMAGE_SIZE,IMAGE_SIZE,IMAGE_SIZE), # in mms augmentation_params=AUGMENTATION_PARAMETERS ), ZMUV("luna:3d", bias = -648.59027, std = 679.21021), ] ##################### # training # ##################### "This is the train dataloader. We will train until this one stops loading data." "You can set the number of epochs, the datasets and if you want it multiprocessed" training_data = LunaDataLoader( only_positive=True, sets=TRAINING, epochs=30, preprocessors=preprocessors, multiprocess=True, crash_on_exception=True, ) "Schedule the reducing of the learning rate. On indexing with the number of epochs, it should return a value for the learning rate." learning_rate_schedule = { 0.0: 0.00001, 10.0: 0.000005, 16.0: 0.000002, 18.0: 0.000001, } "The function to build updates." build_updates = lasagne.updates.adam
# for building the segmentation model, the input tag should be replaced replace_input_tags = {"luna:3d": tag+"3d"} #{old:new} # prep before patches preprocessors = [] # prep on the patches ################################# # HuNorm happens inside network # ################################# postpreprocessors = [] data_loader= LunaDataLoader( sets=[VALIDATION], preprocessors=preprocessors, epochs=1, multiprocess=False, crash_on_exception=True) batch_size = 1 # only works with 1 # function to call to extract nodules from the fully reconstructed segmentation def extract_nodules(segmentation): """segmentation is a 3D array""" rois = blob_dog(segmentation, min_sigma=1, max_sigma=15, threshold=0.1) print rois.shape[0] if rois.shape[0] > 0: rois = rois[:, :3] #ignore diameter else: return None return rois
def check_nodules(set, plot=False): prediction_config = "configurations.elias.roi_luna_1" prediction_folder = paths.MODEL_PREDICTIONS_PATH + '/' + prediction_config + '/' tags = ["luna:patient_id", "luna:origin", "luna:pixelspacing", "luna:labels", "luna:shape", "luna:3d"] print 'checking nodules for set', set, set_indices = config.data_loader.indices[set] print 'no_samples', len(set_indices) n_nodules, n_found, n_regions = 0, 0, 0 for _i, sample_id in enumerate(set_indices): print "sample_id", sample_id, _i+1, "/", len(set_indices), "in", set data = config.data_loader.load_sample(sample_id, tags,{}) patient_id = data["input"]["luna:patient_id"] origin = data["input"]["luna:origin"] spacing = data["input"]["luna:pixelspacing"] nodules = data["input"]["luna:labels"] shape = data["input"]["luna:shape"] volume = data["input"]["luna:3d"] print 'pixelspacing', spacing print 'shape', shape rois = read_rois(prediction_folder, patient_id) n_regions += len(rois) max_dim = 0 for nidx, nodule in enumerate(nodules): n_nodules += 1 print 'nodule orig coos', nodule n = LunaDataLoader.world_to_voxel_coordinates(nodule[:3], origin, spacing) print n diameter_in_mm = nodule[3] nodule = n[:3] if plot: center = np.round(nodule).astype(int) fig = plt.figure() ax1 = fig.add_subplot(1,3,1, adjustable='box', aspect=1.0) ax1.imshow(volume[center[0],:,:].transpose(), interpolation='none', cmap=plt.cm.gray) circ1 = plt.Circle((center[1],center[2]), 24, color='y', fill=False) ax1.add_patch(circ1) ax2 = fig.add_subplot(1,3,2, adjustable='box', aspect=1.0) ax2.imshow(volume[:,center[1],:].transpose(), interpolation='none', cmap=plt.cm.gray) circ2 = plt.Circle((center[0],center[2]), 24, color='y', fill=False) ax2.add_patch(circ2) ax3 = fig.add_subplot(1,3,3, adjustable='box', aspect=1.0) ax3.imshow(volume[:,:,center[2]].transpose(), interpolation='none', cmap=plt.cm.gray) circ3 = plt.Circle((center[0],center[1]), 24, color='y', fill=False) ax3.add_patch(circ3) plt.tight_layout() fig.savefig(str(sample_id)+'_'+str(nidx)+'.jpg') # apply spacing nodule = nodule*spacing print 'after spacing', nodule # Find the closest region of interest closest_roi = None min_distance = 99999999. for roi in rois: md = max(roi) if md > max_dim: max_dim = md distance = sum((roi-nodule)**2)**(0.5) if distance < min_distance: min_distance = distance closest_roi = roi print 'max_dim', max_dim print 'n', n print 'closest_roi', closest_roi print 'min_distance', min_distance print 'diameter', diameter_in_mm if min_distance < diameter_in_mm: n_found += 1 print 'found', n_found, '/', n_nodules print 'n_regions', n_regions
# output_shape=(128,128,128), # norm_patch_size=(128,128,128), # augmentation_params=AUGMENTATION_PARAMETERS # ) # RescaleInput(input_scale=(0,255), output_scale=(0.0, 1.0)), #AugmentInput(output_shape=(160,120),**augmentation_parameters), #NormalizeInput(num_samples=100), ] ##################### # training # ##################### training_data = LunaDataLoader( only_positive=True, sets=TRAINING, epochs=10, preprocessors=preprocessors, multiprocess=False, crash_on_exception=True ) chunk_size = 1 training_data.prepare() data,segm = None,None sample_nr = 0 def get_data(): global data,segm global sample_nr while True: ##################### # single #
def check_nodules(set, roi_config, fpr_config, iter_predict, x_shared, prediction_folder, output_folder, plot=False): tags = [ "luna:patient_id", "luna:origin", "luna:pixelspacing", "luna:labels", "luna:shape", "luna:3d" ] print 'checking nodules for set', set, set_indices = roi_config.data_loader.indices[set] print 'no_samples', len(set_indices) n_nodules, n_found_rois, n_found_in_masks, n_regions, n_regions_in_mask = 0, 0, 0, 0, 0 all_fpr_ps = [] tp_fpr_ps = [] tpls_fpr_ps = [] rd = np.array(fpr_config.nn_input_shape) / 2 pw = rd[0] for _i, sample_id in enumerate(set_indices): # if _i == 6: # break print "sample_id", sample_id, _i + 1, "/", len(set_indices), "in", set data = roi_config.data_loader.load_sample(sample_id, tags, {}) patient_id = data["input"]["luna:patient_id"] origin = data["input"]["luna:origin"] spacing = data["input"]["luna:pixelspacing"] nodules = data["input"]["luna:labels"] shape = data["input"]["luna:shape"] volume = data["input"]["luna:3d"] pvolume = np.pad(volume, pw, 'constant') rois = read_rois(prediction_folder, patient_id) n_regions += len(rois) rois_in_mask = np.zeros((len(rois))) for idx, roi in enumerate(rois): if in_mask(roi / spacing, volume): rois_in_mask[idx] = 1 n_regions_in_mask += np.sum(rois_in_mask) fpr_p = np.zeros((len(rois)), dtype=np.float32) batch_size = 32 for i in range(0, len(rois), batch_size): brois = rois[i:i + batch_size] brois = np.round(brois / spacing).astype(int) patches = [] for center in brois: patch = pvolume[pw+center[0]-rd[0]:pw+center[0]+rd[0], \ pw+center[1]-rd[1]:pw+center[1]+rd[1], \ pw+center[2]-rd[2]:pw+center[2]+rd[2]] patches.append(patch) patches = np.array(patches) ypred = get_prediction(patches, x_shared, iter_predict) fpr_p[i:i + batch_size] = ypred[0][:, 1] all_fpr_ps.append(fpr_p) max_dim = 0 for nidx, nodule in enumerate(nodules): n_nodules += 1 print 'nodule orig coos', nodule n = LunaDataLoader.world_to_voxel_coordinates( nodule[:3], origin, spacing) print n diameter_in_mm = nodule[3] nodule = n[:3] if plot: center = np.round(nodule).astype(int) fig = plt.figure() ax1 = fig.add_subplot(1, 3, 1, adjustable='box', aspect=1.0) ax1.imshow(volume[center[0], :, :].transpose(), interpolation='none', cmap=plt.cm.gray) circ1 = plt.Circle((center[1], center[2]), 24, color='y', fill=False) ax1.add_patch(circ1) ax2 = fig.add_subplot(1, 3, 2, adjustable='box', aspect=1.0) ax2.imshow(volume[:, center[1], :].transpose(), interpolation='none', cmap=plt.cm.gray) circ2 = plt.Circle((center[0], center[2]), 24, color='y', fill=False) ax2.add_patch(circ2) ax3 = fig.add_subplot(1, 3, 3, adjustable='box', aspect=1.0) ax3.imshow(volume[:, :, center[2]].transpose(), interpolation='none', cmap=plt.cm.gray) circ3 = plt.Circle((center[0], center[1]), 24, color='y', fill=False) ax3.add_patch(circ3) plt.tight_layout() fig.savefig(str(sample_id) + '_' + str(nidx) + '.jpg') # apply spacing nodule = nodule * spacing print 'after spacing', nodule # Find the closest region of interest closest_roi = None min_distance = 99999999. min_idx = 99999999 for roi_idx, roi in enumerate(rois): md = max(roi) if md > max_dim: max_dim = md distance = sum((roi - nodule)**2)**(0.5) if distance < min_distance: min_distance = distance closest_roi = roi min_idx = roi_idx print 'max_dim', max_dim print 'n', n print 'closest_roi', closest_roi print 'min_distance', min_distance print 'diameter', diameter_in_mm found = False if min_distance < diameter_in_mm: n_found_rois += 1 print 'found', n_found_rois, '/', n_nodules found = True tp_fpr_ps.append(fpr_p[min_idx]) # Find the closest roi in lung mask closest_roi = None min_distance = 99999999. min_idx = 99999999 print 'rois_in_mask', rois_in_mask for roi_idx, roi in enumerate(rois): if rois_in_mask[roi_idx]: distance = sum((roi - nodule)**2)**(0.5) if distance < min_distance: min_distance = distance closest_roi = roi min_idx = roi_idx print 'closest_roi in mask', closest_roi print 'min_distance in mask', min_distance if min_distance < diameter_in_mm: n_found_in_masks += 1 print 'found in mask', n_found_in_masks, '/', n_nodules tpls_fpr_ps.append(fpr_p[min_idx]) elif found: plot_masks(closest_roi / spacing, volume, sample_id, nidx) print 'n_regions', n_regions print 'n_regions in lung masks', n_regions_in_mask tp_fpr_ps = np.hstack(tp_fpr_ps) tpls_fpr_ps = np.hstack(tpls_fpr_ps) for element in all_fpr_ps: print element.shape all_fpr_ps = np.hstack(all_fpr_ps) for pcutoff in [ 0.01, 0.015, 0.02, 0.025, 0.03, 0.035, 0.04, 0.05, 0.1, 0.2, 0.5 ]: print 'cutoff', pcutoff print 'tp_fpr_ps', np.sum(tp_fpr_ps > pcutoff), '/', len(tp_fpr_ps) print 'tpls_fpr_ps', np.sum( tpls_fpr_ps > pcutoff), '/', len(tpls_fpr_ps) print 'all_fpr_ps', np.sum(all_fpr_ps > pcutoff), '/', len(all_fpr_ps)
# the tag for the new data tag = "luna:" # put in the pixelspacing tag to be able to make patches extra_tags = [tag + "pixelspacing", tag + "labels"] # for building the segmentation model, the input tag should be replaced replace_input_tags = {"luna:3d": tag + "3d"} #{old:new} # prep before patches preprocessors = [] # prep on the patches postpreprocessors = [ZMUV(tag + "3d", bias=-648.59027, std=679.21021)] data_loader = LunaDataLoader(sets=[TRAINING, VALIDATION], preprocessors=preprocessors, only_positive=True, epochs=1, multiprocess=False, crash_on_exception=True) batch_size = 1 # only works with 1 # function to call to extract nodules from the fully reconstructed segmentation def extract_nodules(segmentation): """segmentation is a 3D array""" rois = blob_dog(segmentation, min_sigma=1, max_sigma=15, threshold=0.1) if rois.shape[0] > 0: rois = rois[:, :3] #ignore diameter else: return None return rois
#from interfaces.preprocess import AugmentInput, RescaleInput "Put in here the preprocessors for your data." \ "They will be run consequently on the datadict of the dataloader in the order of your list." preprocessors = [ Augment3D(tags=["luna:segmentation"], output_shape=(256, 256, 256), norm_patch_shape=(256, 256, 256)), ] ##################### # training # ##################### training_data = LunaDataLoader(sets=TRAINING, epochs=1, preprocessors=preprocessors, multiprocess=False, crash_on_exception=True) chunk_size = 1 training_data.prepare() if True: print training_data.number_of_samples batches = training_data.generate_batch( chunk_size=chunk_size, required_input={}, required_output={"luna:segmentation": None}, )
def check_nodules(set, roi_config, roi_ls_fpr_folder): tags = [ "luna:patient_id", "luna:labels", "luna:origin", "luna:pixelspacing", "luna:labels" ] print 'checking nodules for set', set, set_indices = roi_config.data_loader.indices[set] print 'no_samples', len(set_indices) n_nodules, n_found_rois, n_found_in_masks, n_regions, n_regions_in_mask = 0, 0, 0, 0, 0 all_fpr_ps = [] tp_fpr_ps = [] tp_fpr_rank = [] tpls_fpr_ps = [] tpls_fpr_rank = [] for _i, sample_id in enumerate(set_indices): # if _i == 10: # break print "sample_id", sample_id, _i + 1, "/", len(set_indices), "in", set data = roi_config.data_loader.load_sample(sample_id, tags, {}) patient_id = data["input"]["luna:patient_id"] nodules = data["input"]["luna:labels"] origin = data["input"]["luna:origin"] spacing = data["input"]["luna:pixelspacing"] nodules = data["input"]["luna:labels"] dict = read_dict(roi_ls_fpr_folder, patient_id) rois = dict['rois'] n_regions += len(rois) rois_in_mask = dict['in_mask'] n_regions_in_mask += np.sum(rois_in_mask) fpr_p = dict['fpr_p'] all_fpr_ps.append(fpr_p) fpr_lung_p = np.copy(fpr_p) fpr_lung_p[rois_in_mask == 0] = 0 #save rois that are above the rank_rois = len(fpr_p) - rankdata(fpr_p).astype(int) rank_rois_lung = len(fpr_lung_p) - rankdata(fpr_lung_p).astype(int) max_dim = 0 for nidx, nodule in enumerate(nodules): n_nodules += 1 print 'nodule orig coos', nodule n = LunaDataLoader.world_to_voxel_coordinates( nodule[:3], origin, spacing) print n diameter_in_mm = nodule[3] nodule = n[:3] # apply spacing nodule = nodule * spacing print 'after spacing', nodule # Find the closest region of interest closest_roi = None min_distance = 99999999. min_idx = 99999999 for roi_idx, roi in enumerate(rois): md = max(roi) if md > max_dim: max_dim = md distance = sum((roi - nodule)**2)**(0.5) if distance < min_distance: min_distance = distance closest_roi = roi min_idx = roi_idx print 'max_dim', max_dim print 'n', n print 'closest_roi', closest_roi print 'min_distance', min_distance print 'diameter', diameter_in_mm found = False if min_distance < diameter_in_mm: n_found_rois += 1 print 'found', n_found_rois, '/', n_nodules found = True tp_fpr_ps.append(fpr_p[min_idx]) tp_fpr_rank.append(rank_rois[min_idx]) # Find the closest roi in lung mask closest_roi = None min_distance = 99999999. min_idx = 99999999 for roi_idx, roi in enumerate(rois): if rois_in_mask[roi_idx]: distance = sum((roi - nodule)**2)**(0.5) if distance < min_distance: min_distance = distance closest_roi = roi min_idx = roi_idx print 'closest_roi in mask', closest_roi print 'min_distance in mask', min_distance if min_distance < diameter_in_mm: n_found_in_masks += 1 print 'found in mask', n_found_in_masks, '/', n_nodules tpls_fpr_ps.append(fpr_p[min_idx]) tpls_fpr_rank.append(rank_rois_lung[min_idx]) # elif found: # plot_masks(closest_roi/spacing,volume,sample_id, nidx) print 'n_regions', n_regions print 'n_regions in lung masks', n_regions_in_mask tp_fpr_ps = np.hstack(tp_fpr_ps) tpls_fpr_ps = np.hstack(tpls_fpr_ps) all_fpr_ps = np.hstack(all_fpr_ps) tp_fpr_rank = np.hstack(tp_fpr_rank) tpls_fpr_rank = np.hstack(tpls_fpr_rank) print '============ Sweep p cutoff ================' for pcutoff in [ 0.01, 0.015, 0.02, 0.025, 0.03, 0.035, 0.04, 0.05, 0.1, 0.2, 0.5 ]: print 'cutoff', pcutoff print 'tp_fpr_ps', np.sum(tp_fpr_ps > pcutoff), '/', len(tp_fpr_ps) print 'tpls_fpr_ps', np.sum( tpls_fpr_ps > pcutoff), '/', len(tpls_fpr_ps) print 'all_fpr_ps', np.sum(all_fpr_ps > pcutoff), '/', len(all_fpr_ps) print '============ Sweep Top x ================' for topx in [4, 6, 8, 10, 12, 14, 16]: print 'top', topx print 'tp', np.sum(tp_fpr_rank < topx), '/', len(tp_fpr_rank) print 'tp_ls', np.sum(tpls_fpr_rank < topx), '/', len(tpls_fpr_rank)
output_shape=(IMAGE_SIZE, IMAGE_SIZE, IMAGE_SIZE), # in pixels norm_patch_shape=(IMAGE_SIZE, IMAGE_SIZE, IMAGE_SIZE), # in mms augmentation_params=AUGMENTATION_PARAMETERS), ZMUV("luna:3d", bias=-648.59027, std=679.21021), ] ##################### # training # ##################### "This is the train dataloader. We will train until this one stops loading data." "You can set the number of epochs, the datasets and if you want it multiprocessed" training_data = LunaDataLoader( only_positive=True, pick_nodule=True, sets=TRAINING, epochs=num_epochs, preprocessors=preprocessors, multiprocess=True, crash_on_exception=True, ) "Schedule the reducing of the learning rate. On indexing with the number of epochs, it should return a value for the learning rate." learning_rate_schedule = { 0: 1e-5, int(num_epochs * 0.4): 5e-6, int(num_epochs * 0.5): 3e-6, int(num_epochs * 0.6): 2e-6, int(num_epochs * 0.85): 1e-6, int(num_epochs * 0.95): 5e-7 }
tags=["luna:3d", "luna:segmentation"], output_shape=(128, 128, 128), # in pixels norm_patch_shape=(128, 128, 128), # in mms augmentation_params=AUGMENTATION_PARAMETERS), ZMUV("luna:3d", bias=-648.59027, std=679.21021), ] ##################### # training # ##################### "This is the train dataloader. We will train until this one stops loading data." "You can set the number of epochs, the datasets and if you want it multiprocessed" training_data = LunaDataLoader( sets=TRAINING, epochs=10.0, preprocessors=preprocessors, multiprocess=True, crash_on_exception=False, ) "Schedule the reducing of the learning rate. On indexing with the number of epochs, it should return a value for the learning rate." learning_rate_schedule = { 0.0: 0.0001, 9.0: 0.00001, } "The function to build updates." build_updates = lasagne.updates.adam ##################### # validation # #####################
"change_brightness": [0, 0], } preprocessors = [ Augment3D(tags=[]) #RescaleInput(input_scale=(0,255), output_scale=(0.0, 1.0)), #AugmentInput(output_shape=(160,120),**augmentation_parameters), #NormalizeInput(num_samples=100), ] ##################### # training # ##################### training_data = LunaDataLoader(sets=TRAINING, epochs=1, preprocessors=preprocessors, multiprocess=False, crash_on_exception=True) chunk_size = 1 training_data.prepare() if False: print training_data.number_of_samples batches = training_data.generate_batch( chunk_size=chunk_size, required_input={ "luna:shape": (chunk_size, 3), "luna:pixelspacing": (chunk_size, 3) }, #"luna:3d":(chunk_size,512,512,512),