def run(img_folder, dl_state, fprop_mode=False, img_size=(1152, 896), img_height=None, img_scale=None, rescale_factor=None, equalize_hist=False, featurewise_center=False, featurewise_mean=71.8, net='vgg19', batch_size=128, patch_size=256, stride=8, avg_pool_size=(7, 7), hm_strides=(1, 1), pat_csv='./full_img/pat.csv', pat_list=None, out='./output/prob_heatmap.pkl'): '''Sweep mammograms with trained DL model to create prob heatmaps ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) rng = RandomState(random_seed) # an rng used across board. gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Create image generator. imgen = DMImageDataGenerator(featurewise_center=featurewise_center) imgen.mean = featurewise_mean # Get image and label lists. df = pd.read_csv(pat_csv, header=0) df = df.set_index(['patient_id', 'side']) df.sort_index(inplace=True) if pat_list is not None: pat_ids = pd.read_csv(pat_list, header=0).values.ravel() pat_ids = pat_ids.tolist() print ("Read %d patient IDs" % (len(pat_ids))) df = df.loc[pat_ids] # Load DL model, preprocess. print ("Load patch classifier:", dl_state) sys.stdout.flush() dl_model, preprocess_input, _ = get_dl_model(net, resume_from=dl_state) if fprop_mode: dl_model = add_top_layers(dl_model, img_size, patch_net=net, avg_pool_size=avg_pool_size, return_heatmap=True, hm_strides=hm_strides) if gpu_count > 1: print ("Make the model parallel on %d GPUs" % (gpu_count)) sys.stdout.flush() dl_model, _ = make_parallel(dl_model, gpu_count) parallelized = True else: parallelized = False if featurewise_center: preprocess_input = None # Sweep the whole images and classify patches. def const_filename(pat, side, view): basename = '_'.join([pat, side, view]) + '.png' return os.path.join(img_folder, basename) print ("Generate prob heatmaps") sys.stdout.flush() heatmaps = [] cases_seen = 0 nb_cases = len(df.index.unique()) for i, (pat,side) in enumerate(df.index.unique()): ## DEBUG ## #if i >= 10: # break ## DEBUG ## cancer = df.loc[pat].loc[side]['cancer'] cc_fn = const_filename(pat, side, 'CC') if os.path.isfile(cc_fn): if fprop_mode: cc_x = read_img_for_pred( cc_fn, equalize_hist=equalize_hist, data_format=data_format, dup_3_channels=True, transformer=imgen.random_transform, standardizer=imgen.standardize, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor) cc_x = cc_x.reshape((1,) + cc_x.shape) cc_hm = dl_model.predict_on_batch(cc_x)[0] # import pdb; pdb.set_trace() else: cc_hm = get_prob_heatmap( cc_fn, img_height, img_scale, patch_size, stride, dl_model, batch_size, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, preprocess=preprocess_input, parallelized=parallelized, equalize_hist=equalize_hist) else: cc_hm = None mlo_fn = const_filename(pat, side, 'MLO') if os.path.isfile(mlo_fn): if fprop_mode: mlo_x = read_img_for_pred( mlo_fn, equalize_hist=equalize_hist, data_format=data_format, dup_3_channels=True, transformer=imgen.random_transform, standardizer=imgen.standardize, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor) mlo_x = mlo_x.reshape((1,) + mlo_x.shape) mlo_hm = dl_model.predict_on_batch(mlo_x)[0] else: mlo_hm = get_prob_heatmap( mlo_fn, img_height, img_scale, patch_size, stride, dl_model, batch_size, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, preprocess=preprocess_input, parallelized=parallelized, equalize_hist=equalize_hist) else: mlo_hm = None heatmaps.append({'patient_id':pat, 'side':side, 'cancer':cancer, 'cc':cc_hm, 'mlo':mlo_hm}) print ("scored %d/%d cases" % (i + 1, nb_cases)) sys.stdout.flush() print ("Done.") # Save the result. print ("Saving result to external files.",) sys.stdout.flush() pickle.dump(heatmaps, open(out, 'w')) print ("Done.")
def run(train_dir, val_dir, test_dir, img_size=[256, 256], img_scale=None, rescale_factor=None, featurewise_center=True, featurewise_mean=59.6, equalize_hist=True, augmentation=False, class_list=['background', 'malignant', 'benign'], batch_size=64, train_bs_multiplier=.5, nb_epoch=5, top_layer_epochs=10, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, net='resnet50', use_pretrained=True, nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, weight_decay2=.0001, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, resume_from=None, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, top_layer_nb=None, top_layer_multiplier=.1, all_layer_multiplier=.01, best_model='./modelState/patch_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for patch classifications ''' best_model_dir = os.path.dirname(best_model) if not os.path.exists(best_model_dir): os.makedirs(best_model_dir) if final_model != "NOSAVE": final_model_dir = os.path.dirname(final_model) if not os.path.exists(final_model_dir): os.makedirs(final_model_dir) # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== # if featurewise_center: print "Using feature-wise centering, mean:", featurewise_mean train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # Add augmentation options. if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 25. # in degree. train_imgen.shear_range = .2 # in radians. train_imgen.zoom_range = [.8, 1.2] # in proportion. train_imgen.channel_shift_range = 20. # in pixel intensity values. # ================= Model creation ============== # model, preprocess_input, top_layer_nb = get_dl_model( net, nb_class=len(class_list), use_pretrained=use_pretrained, resume_from=resume_from, img_size=img_size, top_layer_nb=top_layer_nb, weight_decay=weight_decay, hidden_dropout=hidden_dropout, nb_init_filter=nb_init_filter, init_filter_size=init_filter_size, init_conv_stride=init_conv_stride, pool_size=pool_size, pool_stride=pool_stride, alpha=alpha, l1_ratio=l1_ratio, inp_dropout=inp_dropout) if featurewise_center: preprocess_input = None if gpu_count > 1: model, org_model = make_parallel(model, gpu_count) else: org_model = model # ============ Train & validation set =============== # train_bs = int(batch_size * train_bs_multiplier) if net != 'yaroslav': dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done." sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) print "Create generator for val set" validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print "Done." sys.stdout.flush() # ==================== Model training ==================== # # Do 3-stage training. train_batches = int(train_generator.nb_sample / train_bs) + 1 if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample validation_steps = int(val_samples / batch_size) #### DEBUG #### # val_samples = 100 #### DEBUG #### # import pdb; pdb.set_trace() model, loss_hist, acc_hist = do_3stage_training( model, org_model, train_generator, validation_set, validation_steps, best_model, train_batches, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=use_pretrained, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, nb_class=len(class_list), pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, weight_decay2=weight_decay2, hidden_dropout2=hidden_dropout2) # Training report. if len(loss_hist) > 0: min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy if final_model != "NOSAVE": model.save(final_model) # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) if test_generator.nb_sample: print "Test samples =", test_generator.nb_sample print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_steps = int(test_generator.nb_sample / batch_size) #### DEBUG #### # test_samples = 10 #### DEBUG #### test_res = model.evaluate_generator( test_generator, test_steps, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res else: print "Skip testing because no test sample is found."
def run(img_folder, dl_state, fprop_mode=False, img_size=(1152, 896), img_height=None, img_scale=None, rescale_factor=None, equalize_hist=False, featurewise_center=False, featurewise_mean=71.8, net='vgg19', batch_size=128, patch_size=256, stride=8, avg_pool_size=(7, 7), hm_strides=(1, 1), pat_csv='./full_img/pat.csv', pat_list=None, out='./output/prob_heatmap.pkl'): '''Sweep mammograms with trained DL model to create prob heatmaps ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) rng = RandomState(random_seed) # an rng used across board. gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Create image generator. imgen = DMImageDataGenerator(featurewise_center=featurewise_center) imgen.mean = featurewise_mean # Get image and label lists. df = pd.read_csv(pat_csv, header=0) df = df.set_index(['patient_id', 'side']) df.sort_index(inplace=True) if pat_list is not None: pat_ids = pd.read_csv(pat_list, header=0).values.ravel() pat_ids = pat_ids.tolist() print "Read %d patient IDs" % (len(pat_ids)) df = df.loc[pat_ids] # Load DL model, preprocess. print "Load patch classifier:", dl_state; sys.stdout.flush() dl_model, preprocess_input, _ = get_dl_model(net, resume_from=dl_state) if fprop_mode: dl_model = add_top_layers(dl_model, img_size, patch_net=net, avg_pool_size=avg_pool_size, return_heatmap=True, hm_strides=hm_strides) if gpu_count > 1: print "Make the model parallel on %d GPUs" % (gpu_count) sys.stdout.flush() dl_model, _ = make_parallel(dl_model, gpu_count) parallelized = True else: parallelized = False if featurewise_center: preprocess_input = None # Sweep the whole images and classify patches. def const_filename(pat, side, view): basename = '_'.join([pat, side, view]) + '.png' return os.path.join(img_folder, basename) print "Generate prob heatmaps"; sys.stdout.flush() heatmaps = [] cases_seen = 0 nb_cases = len(df.index.unique()) for i, (pat,side) in enumerate(df.index.unique()): ## DEBUG ## #if i >= 10: # break ## DEBUG ## cancer = df.loc[pat].loc[side]['cancer'] cc_fn = const_filename(pat, side, 'CC') if os.path.isfile(cc_fn): if fprop_mode: cc_x = read_img_for_pred( cc_fn, equalize_hist=equalize_hist, data_format=data_format, dup_3_channels=True, transformer=imgen.random_transform, standardizer=imgen.standardize, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor) cc_x = cc_x.reshape((1,) + cc_x.shape) cc_hm = dl_model.predict_on_batch(cc_x)[0] # import pdb; pdb.set_trace() else: cc_hm = get_prob_heatmap( cc_fn, img_height, img_scale, patch_size, stride, dl_model, batch_size, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, preprocess=preprocess_input, parallelized=parallelized, equalize_hist=equalize_hist) else: cc_hm = None mlo_fn = const_filename(pat, side, 'MLO') if os.path.isfile(mlo_fn): if fprop_mode: mlo_x = read_img_for_pred( mlo_fn, equalize_hist=equalize_hist, data_format=data_format, dup_3_channels=True, transformer=imgen.random_transform, standardizer=imgen.standardize, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor) mlo_x = mlo_x.reshape((1,) + mlo_x.shape) mlo_hm = dl_model.predict_on_batch(mlo_x)[0] else: mlo_hm = get_prob_heatmap( mlo_fn, img_height, img_scale, patch_size, stride, dl_model, batch_size, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, preprocess=preprocess_input, parallelized=parallelized, equalize_hist=equalize_hist) else: mlo_hm = None heatmaps.append({'patient_id':pat, 'side':side, 'cancer':cancer, 'cc':cc_hm, 'mlo':mlo_hm}) print "scored %d/%d cases" % (i + 1, nb_cases) sys.stdout.flush() print "Done." # Save the result. print "Saving result to external files.", sys.stdout.flush() pickle.dump(heatmaps, open(out, 'w')) print "Done."
def run(train_dir, val_dir, test_dir, img_size=[256, 256], img_scale=None, rescale_factor=None, featurewise_center=True, featurewise_mean=59.6, equalize_hist=True, augmentation=False, class_list=['background', 'malignant', 'benign'], batch_size=64, train_bs_multiplier=.5, nb_epoch=5, top_layer_epochs=10, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, net='resnet50', use_pretrained=True, nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, weight_decay2=.0001, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, resume_from=None, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, top_layer_nb=None, top_layer_multiplier=.1, all_layer_multiplier=.01, best_model='./modelState/patch_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for patch classifications ''' #给块分类训练一个深度学习模型 # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== #图片生成 if featurewise_center:#数据集去中心化 train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # Add augmentation options. #图像增强 if augmentation: train_imgen.horizontal_flip = True #进行随机水平翻转 train_imgen.vertical_flip = True#进行随机垂直翻转 train_imgen.rotation_range = 25. # in degree.#整数,数据提升时图片随机转动的角度 train_imgen.shear_range = .2 # in radians.浮点数,剪切强度(逆时针方向的剪切变换角度) train_imgen.zoom_range = [.8, 1.2] # in proportion. ''' 浮点数或形如[lower,upper]的列表,随机缩放的幅度,若为浮点数,则相当于[lower,upper] = [1 - zoom_range, 1+zoom_range] ''' train_imgen.channel_shift_range = 20. # in pixel intensity values. #.浮点数,随机通道偏移的幅度 #通过对颜色通道的数值偏移,改变图片的整体的颜色 # ================= Model creation ============== #模型创建 ''' 一、weight decay(权值衰减)使用的目的是防止过拟合。 在损失函数中,weight decay是放在正则项(regularization)前面的一个系数,正则项一般指示模型的复杂度, 所以weight decay的作用是调节模型复杂度对损失函数的影响,若weight decay很大,则复杂的模型损失函数的值也就大。 hidden_dropout 防止过拟合 init_conv_stride 卷积核步幅大小 pool_size 池化层大小,pool_stride 池化层步幅(一般是最大值池化,和平均值) alpha 给图像添加透明度 l1_ratio 交叉验证选择l1和l2惩罚之间的折中,类可以通过交叉验证来设置 alpha(α) 和 l1_ratio(ρ) **参数 :l1_ratio 参数来控制L1和L2的凸组合 inp_dropout 输入权重随机抛弃 ''' model, preprocess_input, top_layer_nb = get_dl_model( net, nb_class=len(class_list), use_pretrained=use_pretrained, resume_from=resume_from, img_size=img_size, top_layer_nb=top_layer_nb, weight_decay=weight_decay, hidden_dropout=hidden_dropout, nb_init_filter=nb_init_filter, init_filter_size=init_filter_size, init_conv_stride=init_conv_stride, pool_size=pool_size, pool_stride=pool_stride, alpha=alpha, l1_ratio=l1_ratio, inp_dropout=inp_dropout) if featurewise_center: preprocess_input = None if gpu_count > 1: model, org_model = make_parallel(model, gpu_count)#并行计算 else: org_model = model # ============ Train & validation set =============== # #训练和验证集 train_bs = int(batch_size*train_bs_multiplier)#每批数据量的大小*乘数 if net != 'yaroslav':#dm_keras_ext.py dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator()#t图片数据生成器 #创建行训练集数据生成器 print ("Create generator for raw train set") #以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据 ''' equalize_hist 直方图均衡, shuffle 随机打乱数据 ''' raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) #加载行训练数据集到内存 print ("Loading raw train set into RAM.",sys.stdout.flush()) #行数据集 raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print ("Done."); sys.stdout.flush() #为训练集创建生成器 print ("Create generator for train set") #接收numpy数组和标签为参数,生成经过数据提升或标准化后的batch数据,并在一个无限循环中不断的返回batch数据 train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print ("Create generator for train set") #以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据 train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) #创建验证集生成器 print ("Create generator for val set") # 以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据 validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) sys.stdout.flush() #是否加载验证集到内存中 if load_val_ram: print ("Loading validation set into RAM.", sys.stdout.flush()) validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print ("Done."); sys.stdout.flush() # ==================== Model training ==================== #模型训练 # Do 3-stage training.三个阶段训练 train_batches = int(train_generator.nb_sample/train_bs) + 1 #判断验证集是否三元组 if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample validation_steps = int(val_samples/batch_size) #### DEBUG #### # val_samples = 100 #### DEBUG #### # import pdb; pdb.set_trace() #通过三阶段训练得到模型,损失率,准确率 model, loss_hist, acc_hist = do_3stage_training( model, org_model, train_generator, validation_set, validation_steps, best_model, train_batches, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=use_pretrained, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, nb_class=len(class_list), pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, weight_decay2=weight_decay2, hidden_dropout2=hidden_dropout2) # Training report. #训练报告 if len(loss_hist) > 0: min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print ("\n==== Training summary ====") print ("Minimum val loss achieved at epoch:", min_loss_locs[0] + 1) print ("Best val loss:", best_val_loss) print ("Best val accuracy:", best_val_accuracy) #保存模型 if final_model != "NOSAVE": model.save(final_model) # ==== Predict on test set ==== # #基于测试集的预测 print ("\n==== Predicting on test set ====") # 以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据 test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) print ("Test samples =", test_generator.nb_sample) #加载最好的模型 print ("Load saved best model:", best_model + '.', sys.stdout.flush()) #原始模型加载最好模型的权重 org_model.load_weights(best_model) print ("Done.") #测试的步数 test_steps = int(test_generator.nb_sample/batch_size) #### DEBUG #### # test_samples = 10 #### DEBUG #### test_res = model.evaluate_generator( test_generator, test_steps, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print ("Evaluation result on test set:", test_res)
def run(train_dir, val_dir, test_dir, img_size=[256, 256], img_scale=None, rescale_factor=None, featurewise_center=True, featurewise_mean=59.6, equalize_hist=True, augmentation=False, class_list=['background', 'malignant', 'benign'], batch_size=64, train_bs_multiplier=.5, nb_epoch=5, top_layer_epochs=10, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, net='resnet50', use_pretrained=True, nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, weight_decay2=.0001, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, resume_from=None, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, top_layer_nb=None, top_layer_multiplier=.1, all_layer_multiplier=.01, best_model='./modelState/patch_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for patch classifications ''' # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== # if featurewise_center: train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # Add augmentation options. if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 25. # in degree. train_imgen.shear_range = .2 # in radians. train_imgen.zoom_range = [.8, 1.2] # in proportion. train_imgen.channel_shift_range = 20. # in pixel intensity values. # ================= Model creation ============== # model, preprocess_input, top_layer_nb = get_dl_model( net, nb_class=len(class_list), use_pretrained=use_pretrained, resume_from=resume_from, img_size=img_size, top_layer_nb=top_layer_nb, weight_decay=weight_decay, hidden_dropout=hidden_dropout, nb_init_filter=nb_init_filter, init_filter_size=init_filter_size, init_conv_stride=init_conv_stride, pool_size=pool_size, pool_stride=pool_stride, alpha=alpha, l1_ratio=l1_ratio, inp_dropout=inp_dropout) if featurewise_center: preprocess_input = None if gpu_count > 1: model, org_model = make_parallel(model, gpu_count) else: org_model = model # ============ Train & validation set =============== # train_bs = int(batch_size*train_bs_multiplier) if net != 'yaroslav': dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done."; sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) print "Create generator for val set" validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print "Done."; sys.stdout.flush() # ==================== Model training ==================== # # Do 3-stage training. train_batches = int(train_generator.nb_sample/train_bs) + 1 if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample validation_steps = int(val_samples/batch_size) #### DEBUG #### # val_samples = 100 #### DEBUG #### # import pdb; pdb.set_trace() model, loss_hist, acc_hist = do_3stage_training( model, org_model, train_generator, validation_set, validation_steps, best_model, train_batches, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=use_pretrained, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, nb_class=len(class_list), pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, weight_decay2=weight_decay2, hidden_dropout2=hidden_dropout2) # Training report. if len(loss_hist) > 0: min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy if final_model != "NOSAVE": model.save(final_model) # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) print "Test samples =", test_generator.nb_sample print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_steps = int(test_generator.nb_sample/batch_size) #### DEBUG #### # test_samples = 10 #### DEBUG #### test_res = model.evaluate_generator( test_generator, test_steps, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res
def run(img_folder, dl_state, best_model, img_extension='dcm', img_height=1024, img_scale=255., equalize_hist=False, featurewise_center=False, featurewise_mean=91.6, neg_vs_pos_ratio=1., val_size=.1, test_size=.15, net='vgg19', batch_size=128, train_bs_multiplier=.5, patch_size=256, stride=8, roi_cutoff=.9, bkg_cutoff=[.5, 1.], sample_bkg=True, train_out='./scratch/train', val_out='./scratch/val', test_out='./scratch/test', out_img_ext='png', neg_name='benign', pos_name='malignant', bkg_name='background', augmentation=True, load_train_ram=False, load_val_ram=False, top_layer_nb=None, nb_epoch=10, top_layer_epochs=0, all_layer_epochs=0, optim='sgd', init_lr=.01, top_layer_multiplier=.01, all_layer_multiplier=.0001, es_patience=5, lr_patience=2, weight_decay2=.01, bias_multiplier=.1, hidden_dropout2=.0, exam_tsv='./metadata/exams_metadata.tsv', img_tsv='./metadata/images_crosswalk.tsv', out='./modelState/subj_lists.pkl'): '''Finetune a trained DL model on a different dataset ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) rng = RandomState(random_seed) # an rng used across board. nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Load and split image and label lists. meta_man = DMMetaManager(exam_tsv=exam_tsv, img_tsv=img_tsv, img_folder=img_folder, img_extension=img_extension) subj_list, subj_labs = meta_man.get_subj_labs() subj_labs = np.array(subj_labs) print "Found %d subjests" % (len(subj_list)) print "cancer patients=%d, normal patients=%d" \ % ((subj_labs==1).sum(), (subj_labs==0).sum()) if neg_vs_pos_ratio is not None: subj_list, subj_labs = DMMetaManager.subset_subj_list( subj_list, subj_labs, neg_vs_pos_ratio, random_seed) subj_labs = np.array(subj_labs) print "After subsetting, there are %d subjects" % (len(subj_list)) print "cancer patients=%d, normal patients=%d" \ % ((subj_labs==1).sum(), (subj_labs==0).sum()) subj_train, subj_test, labs_train, labs_test = train_test_split( subj_list, subj_labs, test_size=test_size, stratify=subj_labs, random_state=random_seed) subj_train, subj_val, labs_train, labs_val = train_test_split( subj_train, labs_train, test_size=val_size, stratify=labs_train, random_state=random_seed) # Get image lists. # >>>> Debug <<<< # # subj_train = subj_train[:5] # subj_val = subj_val[:5] # subj_test = subj_test[:5] # >>>> Debug <<<< # print "Get flattened image lists" img_train, ilab_train = meta_man.get_flatten_img_list(subj_train) img_val, ilab_val = meta_man.get_flatten_img_list(subj_val) img_test, ilab_test = meta_man.get_flatten_img_list(subj_test) ilab_train = np.array(ilab_train) ilab_val = np.array(ilab_val) ilab_test = np.array(ilab_test) print "On train set, positive img=%d, negative img=%d" \ % ((ilab_train==1).sum(), (ilab_train==0).sum()) print "On val set, positive img=%d, negative img=%d" \ % ((ilab_val==1).sum(), (ilab_val==0).sum()) print "On test set, positive img=%d, negative img=%d" \ % ((ilab_test==1).sum(), (ilab_test==0).sum()) sys.stdout.flush() # Save the subj lists. print "Saving subject lists to external files.", sys.stdout.flush() pickle.dump((subj_train, subj_val, subj_test), open(out, 'w')) print "Done." # Load DL model, preprocess function. print "Load patch classifier:", dl_state sys.stdout.flush() dl_model, preprocess_input, top_layer_nb = get_dl_model( net, use_pretrained=True, resume_from=dl_state, top_layer_nb=top_layer_nb) if featurewise_center: preprocess_input = None if gpu_count > 1: print "Make the model parallel on %d GPUs" % (gpu_count) sys.stdout.flush() dl_model, org_model = make_parallel(dl_model, gpu_count) parallelized = True else: org_model = dl_model parallelized = False # Sweep the whole images and classify patches. print "Score image patches and write them to:", train_out sys.stdout.flush() nb_roi_train, nb_bkg_train = score_write_patches( img_train, ilab_train, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(train_out, neg_name), pos_out=os.path.join(train_out, pos_name), bkg_out=os.path.join(train_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_train, nb_bkg_train) #### print "Score image patches and write them to:", val_out sys.stdout.flush() nb_roi_val, nb_bkg_val = score_write_patches( img_val, ilab_val, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(val_out, neg_name), pos_out=os.path.join(val_out, pos_name), bkg_out=os.path.join(val_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_val, nb_bkg_val) #### print "Score image patches and write them to:", test_out sys.stdout.flush() nb_roi_test, nb_bkg_test = score_write_patches( img_test, ilab_test, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(test_out, neg_name), pos_out=os.path.join(test_out, pos_name), bkg_out=os.path.join(test_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_test, nb_bkg_test) sys.stdout.flush() # ==== Image generators ==== # if featurewise_center: train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 45. train_imgen.shear_range = np.pi / 8. # ==== Train & val set ==== # # Note: the images are histogram equalized before they were written to # external folders. train_bs = int(batch_size * train_bs_multiplier) if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done." sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow(raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=True, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', auto_batch_balance=True, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) print "Create generator for val set" sys.stdout.flush() validation_set = val_imgen.flow_from_directory( val_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) val_samples = validation_set.nb_sample if parallelized and val_samples % batch_size != 0: val_samples -= val_samples % batch_size print "Validation samples =", val_samples sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, val_samples) print "Done." print "Loaded %d val samples" % (len(validation_set[0])) sys.stdout.flush() # ==== Model finetuning ==== # train_batches = int(train_generator.nb_sample / train_bs) + 1 samples_per_epoch = train_bs * train_batches # import pdb; pdb.set_trace() dl_model, loss_hist, acc_hist = do_3stage_training( dl_model, org_model, train_generator, validation_set, val_samples, best_model, samples_per_epoch, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=True, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=True, nb_worker=nb_worker, weight_decay2=weight_decay2, bias_multiplier=bias_multiplier, hidden_dropout2=hidden_dropout2) # Training report. min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" print "Create generator for test set" test_generator = test_imgen.flow_from_directory( test_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) test_samples = test_generator.nb_sample if parallelized and test_samples % batch_size != 0: test_samples -= test_samples % batch_size print "Test samples =", test_samples print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_res = dl_model.evaluate_generator( test_generator, test_samples, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res
def run(train_dir, val_dir, test_dir, img_size=[256, 256], img_scale=255., featurewise_center=True, featurewise_mean=59.6, equalize_hist=True, augmentation=False, class_list=['background', 'malignant', 'benign'], batch_size=64, train_bs_multiplier=.5, nb_epoch=5, top_layer_epochs=10, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, net='resnet50', use_pretrained=True, nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, weight_decay2=.0001, bias_multiplier=.1, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, resume_from=None, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, top_layer_nb=None, top_layer_multiplier=.1, all_layer_multiplier=.01, best_model='./modelState/patch_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for patch classifications ''' # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== # # if use_pretrained: # use pretrained model's preprocessing. # train_imgen = DMImageDataGenerator() # val_imgen = DMImageDataGenerator() if featurewise_center: # fitgen = DMImageDataGenerator() # # Calculate pixel-level mean and std. # print "Create generator for mean and std fitting" # fit_patch_generator = fitgen.flow_from_directory( # train_dir, target_size=img_size, target_scale=img_scale, # classes=class_list, class_mode=None, batch_size=batch_size, # shuffle=True, seed=random_seed) # sys.stdout.flush() # fit_X_lst = [] # patches_seen = 0 # while patches_seen < fit_size: # X = fit_patch_generator.next() # fit_X_lst.append(X) # patches_seen += len(X) # fit_X_arr = np.concatenate(fit_X_lst) train_imgen = DMImageDataGenerator(featurewise_center=True) # featurewise_std_normalization=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) # featurewise_std_normalization=True) # train_imgen.fit(fit_X_arr) # print "Found mean=%.2f, std=%.2f" % (train_imgen.mean, train_imgen.std) # sys.stdout.flush() train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean # del fit_X_arr, fit_X_lst else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # train_imgen = DMImageDataGenerator( # samplewise_center=True, # samplewise_std_normalization=True) # val_imgen = DMImageDataGenerator( # samplewise_center=True, # samplewise_std_normalization=True) # Add augmentation options. if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 45. train_imgen.shear_range = np.pi / 8. # ================= Model creation ============== # model, preprocess_input, top_layer_nb = get_dl_model( net, nb_class=len(class_list), use_pretrained=use_pretrained, resume_from=resume_from, img_size=img_size, top_layer_nb=top_layer_nb, weight_decay=weight_decay, bias_multiplier=bias_multiplier, hidden_dropout=hidden_dropout, nb_init_filter=nb_init_filter, init_filter_size=init_filter_size, init_conv_stride=init_conv_stride, pool_size=pool_size, pool_stride=pool_stride, alpha=alpha, l1_ratio=l1_ratio, inp_dropout=inp_dropout) if featurewise_center: preprocess_input = None if gpu_count > 1: model, org_model = make_parallel(model, gpu_count) else: org_model = model # ============ Train & validation set =============== # train_bs = int(batch_size * train_bs_multiplier) if use_pretrained: dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done." sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) # import pdb; pdb.set_trace() print "Create generator for val set" validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print "Done." sys.stdout.flush() # ==================== Model training ==================== # # Callbacks and class weight. early_stopping = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1) checkpointer = ModelCheckpoint(best_model, monitor='val_acc', verbose=1, save_best_only=True) stdout_flush = DMFlush() callbacks = [early_stopping, checkpointer, stdout_flush] if optim == 'sgd': reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=lr_patience, verbose=1) callbacks.append(reduce_lr) if auto_batch_balance: class_weight = None elif len(class_list) == 2: class_weight = {0: 1.0, 1: pos_cls_weight} elif len(class_list) == 3: class_weight = {0: 1.0, 1: pos_cls_weight, 2: neg_cls_weight} else: class_weight = None # Do 3-stage training. train_batches = int(train_generator.nb_sample / train_bs) + 1 samples_per_epoch = train_bs * train_batches #### DEBUG #### # samples_per_epoch = train_bs*10 #### DEBUG #### if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample #### DEBUG #### # val_samples = 100 #### DEBUG #### model, loss_hist, acc_hist = do_3stage_training( model, org_model, train_generator, validation_set, val_samples, best_model, samples_per_epoch, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=use_pretrained, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, weight_decay2=weight_decay2, bias_multiplier=bias_multiplier, hidden_dropout2=hidden_dropout2) # Training report. min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy if final_model != "NOSAVE": model.save(final_model) # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) print "Test samples =", test_generator.nb_sample print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_samples = test_generator.nb_sample #### DEBUG #### # test_samples = 10 #### DEBUG #### test_res = model.evaluate_generator( test_generator, test_samples, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res