def run(train_dir, val_dir, test_dir, img_size=[256, 256], img_scale=None, rescale_factor=None, featurewise_center=True, featurewise_mean=59.6, equalize_hist=True, augmentation=False, class_list=['background', 'malignant', 'benign'], batch_size=64, train_bs_multiplier=.5, nb_epoch=5, top_layer_epochs=10, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, net='resnet50', use_pretrained=True, nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, weight_decay2=.0001, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, resume_from=None, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, top_layer_nb=None, top_layer_multiplier=.1, all_layer_multiplier=.01, best_model='./modelState/patch_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for patch classifications ''' best_model_dir = os.path.dirname(best_model) if not os.path.exists(best_model_dir): os.makedirs(best_model_dir) if final_model != "NOSAVE": final_model_dir = os.path.dirname(final_model) if not os.path.exists(final_model_dir): os.makedirs(final_model_dir) # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== # if featurewise_center: print "Using feature-wise centering, mean:", featurewise_mean train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # Add augmentation options. if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 25. # in degree. train_imgen.shear_range = .2 # in radians. train_imgen.zoom_range = [.8, 1.2] # in proportion. train_imgen.channel_shift_range = 20. # in pixel intensity values. # ================= Model creation ============== # model, preprocess_input, top_layer_nb = get_dl_model( net, nb_class=len(class_list), use_pretrained=use_pretrained, resume_from=resume_from, img_size=img_size, top_layer_nb=top_layer_nb, weight_decay=weight_decay, hidden_dropout=hidden_dropout, nb_init_filter=nb_init_filter, init_filter_size=init_filter_size, init_conv_stride=init_conv_stride, pool_size=pool_size, pool_stride=pool_stride, alpha=alpha, l1_ratio=l1_ratio, inp_dropout=inp_dropout) if featurewise_center: preprocess_input = None if gpu_count > 1: model, org_model = make_parallel(model, gpu_count) else: org_model = model # ============ Train & validation set =============== # train_bs = int(batch_size * train_bs_multiplier) if net != 'yaroslav': dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done." sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) print "Create generator for val set" validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print "Done." sys.stdout.flush() # ==================== Model training ==================== # # Do 3-stage training. train_batches = int(train_generator.nb_sample / train_bs) + 1 if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample validation_steps = int(val_samples / batch_size) #### DEBUG #### # val_samples = 100 #### DEBUG #### # import pdb; pdb.set_trace() model, loss_hist, acc_hist = do_3stage_training( model, org_model, train_generator, validation_set, validation_steps, best_model, train_batches, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=use_pretrained, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, nb_class=len(class_list), pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, weight_decay2=weight_decay2, hidden_dropout2=hidden_dropout2) # Training report. if len(loss_hist) > 0: min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy if final_model != "NOSAVE": model.save(final_model) # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) if test_generator.nb_sample: print "Test samples =", test_generator.nb_sample print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_steps = int(test_generator.nb_sample / batch_size) #### DEBUG #### # test_samples = 10 #### DEBUG #### test_res = model.evaluate_generator( test_generator, test_steps, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res else: print "Skip testing because no test sample is found."
def run(train_dir, val_dir, test_dir, img_size=[256, 256], img_scale=None, rescale_factor=None, featurewise_center=True, featurewise_mean=59.6, equalize_hist=True, augmentation=False, class_list=['background', 'malignant', 'benign'], batch_size=64, train_bs_multiplier=.5, nb_epoch=5, top_layer_epochs=10, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, net='resnet50', use_pretrained=True, nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, weight_decay2=.0001, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, resume_from=None, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, top_layer_nb=None, top_layer_multiplier=.1, all_layer_multiplier=.01, best_model='./modelState/patch_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for patch classifications ''' #给块分类训练一个深度学习模型 # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== #图片生成 if featurewise_center:#数据集去中心化 train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # Add augmentation options. #图像增强 if augmentation: train_imgen.horizontal_flip = True #进行随机水平翻转 train_imgen.vertical_flip = True#进行随机垂直翻转 train_imgen.rotation_range = 25. # in degree.#整数,数据提升时图片随机转动的角度 train_imgen.shear_range = .2 # in radians.浮点数,剪切强度(逆时针方向的剪切变换角度) train_imgen.zoom_range = [.8, 1.2] # in proportion. ''' 浮点数或形如[lower,upper]的列表,随机缩放的幅度,若为浮点数,则相当于[lower,upper] = [1 - zoom_range, 1+zoom_range] ''' train_imgen.channel_shift_range = 20. # in pixel intensity values. #.浮点数,随机通道偏移的幅度 #通过对颜色通道的数值偏移,改变图片的整体的颜色 # ================= Model creation ============== #模型创建 ''' 一、weight decay(权值衰减)使用的目的是防止过拟合。 在损失函数中,weight decay是放在正则项(regularization)前面的一个系数,正则项一般指示模型的复杂度, 所以weight decay的作用是调节模型复杂度对损失函数的影响,若weight decay很大,则复杂的模型损失函数的值也就大。 hidden_dropout 防止过拟合 init_conv_stride 卷积核步幅大小 pool_size 池化层大小,pool_stride 池化层步幅(一般是最大值池化,和平均值) alpha 给图像添加透明度 l1_ratio 交叉验证选择l1和l2惩罚之间的折中,类可以通过交叉验证来设置 alpha(α) 和 l1_ratio(ρ) **参数 :l1_ratio 参数来控制L1和L2的凸组合 inp_dropout 输入权重随机抛弃 ''' model, preprocess_input, top_layer_nb = get_dl_model( net, nb_class=len(class_list), use_pretrained=use_pretrained, resume_from=resume_from, img_size=img_size, top_layer_nb=top_layer_nb, weight_decay=weight_decay, hidden_dropout=hidden_dropout, nb_init_filter=nb_init_filter, init_filter_size=init_filter_size, init_conv_stride=init_conv_stride, pool_size=pool_size, pool_stride=pool_stride, alpha=alpha, l1_ratio=l1_ratio, inp_dropout=inp_dropout) if featurewise_center: preprocess_input = None if gpu_count > 1: model, org_model = make_parallel(model, gpu_count)#并行计算 else: org_model = model # ============ Train & validation set =============== # #训练和验证集 train_bs = int(batch_size*train_bs_multiplier)#每批数据量的大小*乘数 if net != 'yaroslav':#dm_keras_ext.py dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator()#t图片数据生成器 #创建行训练集数据生成器 print ("Create generator for raw train set") #以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据 ''' equalize_hist 直方图均衡, shuffle 随机打乱数据 ''' raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) #加载行训练数据集到内存 print ("Loading raw train set into RAM.",sys.stdout.flush()) #行数据集 raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print ("Done."); sys.stdout.flush() #为训练集创建生成器 print ("Create generator for train set") #接收numpy数组和标签为参数,生成经过数据提升或标准化后的batch数据,并在一个无限循环中不断的返回batch数据 train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print ("Create generator for train set") #以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据 train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) #创建验证集生成器 print ("Create generator for val set") # 以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据 validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) sys.stdout.flush() #是否加载验证集到内存中 if load_val_ram: print ("Loading validation set into RAM.", sys.stdout.flush()) validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print ("Done."); sys.stdout.flush() # ==================== Model training ==================== #模型训练 # Do 3-stage training.三个阶段训练 train_batches = int(train_generator.nb_sample/train_bs) + 1 #判断验证集是否三元组 if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample validation_steps = int(val_samples/batch_size) #### DEBUG #### # val_samples = 100 #### DEBUG #### # import pdb; pdb.set_trace() #通过三阶段训练得到模型,损失率,准确率 model, loss_hist, acc_hist = do_3stage_training( model, org_model, train_generator, validation_set, validation_steps, best_model, train_batches, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=use_pretrained, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, nb_class=len(class_list), pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, weight_decay2=weight_decay2, hidden_dropout2=hidden_dropout2) # Training report. #训练报告 if len(loss_hist) > 0: min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print ("\n==== Training summary ====") print ("Minimum val loss achieved at epoch:", min_loss_locs[0] + 1) print ("Best val loss:", best_val_loss) print ("Best val accuracy:", best_val_accuracy) #保存模型 if final_model != "NOSAVE": model.save(final_model) # ==== Predict on test set ==== # #基于测试集的预测 print ("\n==== Predicting on test set ====") # 以文件夹路径为参数,生成经过数据提升/归一化后的数据,在一个无限循环中无限产生batch数据 test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) print ("Test samples =", test_generator.nb_sample) #加载最好的模型 print ("Load saved best model:", best_model + '.', sys.stdout.flush()) #原始模型加载最好模型的权重 org_model.load_weights(best_model) print ("Done.") #测试的步数 test_steps = int(test_generator.nb_sample/batch_size) #### DEBUG #### # test_samples = 10 #### DEBUG #### test_res = model.evaluate_generator( test_generator, test_steps, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print ("Evaluation result on test set:", test_res)
def run(train_dir, val_dir, test_dir, patch_model_state=None, resume_from=None, img_size=[1152, 896], img_scale=None, rescale_factor=None, featurewise_center=True, featurewise_mean=52.16, equalize_hist=False, augmentation=True, class_list=['neg', 'pos'], patch_net='resnet50', block_type='resnet', top_depths=[512, 512], top_repetitions=[3, 3], bottleneck_enlarge_factor=4, add_heatmap=False, avg_pool_size=[7, 7], add_conv=True, add_shortcut=False, hm_strides=(1, 1), hm_pool_size=(5, 5), fc_init_units=64, fc_layers=2, top_layer_nb=None, batch_size=64, train_bs_multiplier=.5, nb_epoch=5, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, weight_decay=.0001, hidden_dropout=.0, weight_decay2=.0001, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, all_layer_multiplier=.1, best_model='./modelState/image_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for image classifications ''' # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== # if featurewise_center: train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # Add augmentation options. if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 25. # in degree. train_imgen.shear_range = .2 # in radians. train_imgen.zoom_range = [.8, 1.2] # in proportion. train_imgen.channel_shift_range = 20. # in pixel intensity values. # ================= Model creation ============== # if resume_from is not None: image_model = load_model(resume_from, compile=False) else: patch_model = load_model(patch_model_state, compile=False) image_model, top_layer_nb = add_top_layers( patch_model, img_size, patch_net, block_type, top_depths, top_repetitions, bottleneck_org, nb_class=len(class_list), shortcut_with_bn=True, bottleneck_enlarge_factor=bottleneck_enlarge_factor, dropout=hidden_dropout, weight_decay=weight_decay, add_heatmap=add_heatmap, avg_pool_size=avg_pool_size, add_conv=add_conv, add_shortcut=add_shortcut, hm_strides=hm_strides, hm_pool_size=hm_pool_size, fc_init_units=fc_init_units, fc_layers=fc_layers) if gpu_count > 1: image_model, org_model = make_parallel(image_model, gpu_count) else: org_model = image_model # ============ Train & validation set =============== # train_bs = int(batch_size * train_bs_multiplier) dup_3_channels = True if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done." sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, shuffle=True, seed=random_seed) print "Create generator for val set" validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, shuffle=False) sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print "Done." sys.stdout.flush() # ==================== Model training ==================== # # Do 2-stage training. train_batches = int(train_generator.nb_sample / train_bs) + 1 if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample validation_steps = int(val_samples / batch_size) #### DEBUG #### # train_batches = 1 # val_samples = batch_size*5 # validation_steps = 5 #### DEBUG #### if load_val_ram: auc_checkpointer = DMAucModelCheckpoint(best_model, validation_set, batch_size=batch_size) else: auc_checkpointer = DMAucModelCheckpoint(best_model, validation_set, test_samples=val_samples) # import pdb; pdb.set_trace() image_model, loss_hist, acc_hist = do_2stage_training( image_model, org_model, train_generator, validation_set, validation_steps, best_model, train_batches, top_layer_nb, nb_epoch=nb_epoch, all_layer_epochs=all_layer_epochs, optim=optim, init_lr=init_lr, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, auc_checkpointer=auc_checkpointer, weight_decay=weight_decay, hidden_dropout=hidden_dropout, weight_decay2=weight_decay2, hidden_dropout2=hidden_dropout2, ) # Training report. if len(loss_hist) > 0: min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy if final_model != "NOSAVE": image_model.save(final_model) # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, shuffle=False) test_samples = test_generator.nb_sample #### DEBUG #### # test_samples = 5 #### DEBUG #### print "Test samples =", test_samples print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." # test_steps = int(test_generator.nb_sample/batch_size) # test_res = image_model.evaluate_generator( # test_generator, test_steps, nb_worker=nb_worker, # pickle_safe=True if nb_worker > 1 else False) test_auc = DMAucModelCheckpoint.calc_test_auc(test_generator, image_model, test_samples=test_samples) print "AUROC on test set:", test_auc
def run(train_dir, val_dir, test_dir, patch_model_state=None, resume_from=None, img_size=[1152, 896], img_scale=None, rescale_factor=None, featurewise_center=True, featurewise_mean=52.16, equalize_hist=False, augmentation=True, class_list=['neg', 'pos'], patch_net='resnet50', block_type='resnet', top_depths=[512, 512], top_repetitions=[3, 3], bottleneck_enlarge_factor=4, add_heatmap=False, avg_pool_size=[7, 7], add_conv=True, add_shortcut=False, hm_strides=(1,1), hm_pool_size=(5,5), fc_init_units=64, fc_layers=2, top_layer_nb=None, batch_size=64, train_bs_multiplier=.5, nb_epoch=5, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, weight_decay=.0001, hidden_dropout=.0, weight_decay2=.0001, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, all_layer_multiplier=.1, best_model='./modelState/image_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for image classifications ''' # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== # if featurewise_center: train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # Add augmentation options. if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 25. # in degree. train_imgen.shear_range = .2 # in radians. train_imgen.zoom_range = [.8, 1.2] # in proportion. train_imgen.channel_shift_range = 20. # in pixel intensity values. # ================= Model creation ============== # if resume_from is not None: image_model = load_model(resume_from, compile=False) else: patch_model = load_model(patch_model_state, compile=False) image_model, top_layer_nb = add_top_layers( patch_model, img_size, patch_net, block_type, top_depths, top_repetitions, bottleneck_org, nb_class=len(class_list), shortcut_with_bn=True, bottleneck_enlarge_factor=bottleneck_enlarge_factor, dropout=hidden_dropout, weight_decay=weight_decay, add_heatmap=add_heatmap, avg_pool_size=avg_pool_size, add_conv=add_conv, add_shortcut=add_shortcut, hm_strides=hm_strides, hm_pool_size=hm_pool_size, fc_init_units=fc_init_units, fc_layers=fc_layers) if gpu_count > 1: image_model, org_model = make_parallel(image_model, gpu_count) else: org_model = image_model # ============ Train & validation set =============== # train_bs = int(batch_size*train_bs_multiplier) if patch_net != 'yaroslav': dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done."; sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, shuffle=True, seed=random_seed) print "Create generator for val set" validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, shuffle=False) sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print "Done."; sys.stdout.flush() # ==================== Model training ==================== # # Do 2-stage training. train_batches = int(train_generator.nb_sample/train_bs) + 1 if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample validation_steps = int(val_samples/batch_size) #### DEBUG #### # train_batches = 1 # val_samples = batch_size*5 # validation_steps = 5 #### DEBUG #### if load_val_ram: auc_checkpointer = DMAucModelCheckpoint( best_model, validation_set, batch_size=batch_size) else: auc_checkpointer = DMAucModelCheckpoint( best_model, validation_set, test_samples=val_samples) # import pdb; pdb.set_trace() image_model, loss_hist, acc_hist = do_2stage_training( image_model, org_model, train_generator, validation_set, validation_steps, best_model, train_batches, top_layer_nb, nb_epoch=nb_epoch, all_layer_epochs=all_layer_epochs, optim=optim, init_lr=init_lr, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, auc_checkpointer=auc_checkpointer, weight_decay=weight_decay, hidden_dropout=hidden_dropout, weight_decay2=weight_decay2, hidden_dropout2=hidden_dropout2,) # Training report. if len(loss_hist) > 0: min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy if final_model != "NOSAVE": image_model.save(final_model) # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, shuffle=False) test_samples = test_generator.nb_sample #### DEBUG #### # test_samples = 5 #### DEBUG #### print "Test samples =", test_samples print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." # test_steps = int(test_generator.nb_sample/batch_size) # test_res = image_model.evaluate_generator( # test_generator, test_steps, nb_worker=nb_worker, # pickle_safe=True if nb_worker > 1 else False) test_auc = DMAucModelCheckpoint.calc_test_auc( test_generator, image_model, test_samples=test_samples) print "AUROC on test set:", test_auc
def run(img_folder, img_extension='dcm', img_height=1024, img_scale=4095, do_featurewise_norm=True, norm_fit_size=10, img_per_batch=2, roi_per_img=32, roi_size=(256, 256), one_patch_mode=False, low_int_threshold=.05, blob_min_area=3, blob_min_int=.5, blob_max_int=.85, blob_th_step=10, data_augmentation=False, roi_state=None, clf_bs=32, cutpoint=.5, amp_factor=1., return_sample_weight=True, auto_batch_balance=True, patches_per_epoch=12800, nb_epoch=20, neg_vs_pos_ratio=None, all_neg_skip=0., nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, init_lr=.01, test_size=.2, val_size=.0, lr_patience=3, es_patience=10, resume_from=None, net='resnet50', load_val_ram=False, load_train_ram=False, no_pos_skip=0., balance_classes=0., pred_img_per_batch=1, pred_roi_per_img=32, exam_tsv='./metadata/exams_metadata.tsv', img_tsv='./metadata/images_crosswalk.tsv', best_model='./modelState/dm_candidROI_best_model.h5', final_model="NOSAVE", pred_trainval=False, pred_out="dl_pred_out.pkl"): '''Run ResNet training on candidate ROIs from mammograms Args: norm_fit_size ([int]): the number of patients used to calculate feature-wise mean and std. ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) # Use of multiple CPU cores is not working! # When nb_worker>1 and pickle_safe=True, this error is encountered: # "failed to enqueue async memcpy from host to device: CUDA_ERROR_NOT_INITIALIZED" # To avoid the error, only this combination worked: # nb_worker=1 and pickle_safe=False. nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Setup training and validation data. # Load image or exam lists and split them into train and val sets. meta_man = DMMetaManager(exam_tsv=exam_tsv, img_tsv=img_tsv, img_folder=img_folder, img_extension=img_extension) # Split data based on subjects. subj_list, subj_labs = meta_man.get_subj_labs() subj_train, subj_test, slab_train, slab_test = train_test_split( subj_list, subj_labs, test_size=test_size, random_state=random_seed, stratify=subj_labs) if val_size > 0: # train/val split. subj_train, subj_val, slab_train, slab_val = train_test_split( subj_train, slab_train, test_size=val_size, random_state=random_seed, stratify=slab_train) else: # use test as val. make a copy of the test list. subj_val = list(subj_test) slab_val = list(slab_test) # import pdb; pdb.set_trace() # Subset subject lists to desired ratio. if neg_vs_pos_ratio is not None: subj_train, slab_train = DMMetaManager.subset_subj_list( subj_train, slab_train, neg_vs_pos_ratio, random_seed) subj_val, slab_val = DMMetaManager.subset_subj_list( subj_val, slab_val, neg_vs_pos_ratio, random_seed) print "After sampling, Nb of subjects for train=%d, val=%d, test=%d" \ % (len(subj_train), len(subj_val), len(subj_test)) # Get image and label lists. img_train, lab_train = meta_man.get_flatten_img_list(subj_train) img_val, lab_val = meta_man.get_flatten_img_list(subj_val) # Create image generators for train, fit and val. imgen_trainval = DMImageDataGenerator() if data_augmentation: imgen_trainval.horizontal_flip=True imgen_trainval.vertical_flip=True imgen_trainval.rotation_range = 45. imgen_trainval.shear_range = np.pi/8. # imgen_trainval.width_shift_range = .05 # imgen_trainval.height_shift_range = .05 # imgen_trainval.zoom_range = [.95, 1.05] if do_featurewise_norm: imgen_trainval.featurewise_center = True imgen_trainval.featurewise_std_normalization = True # Fit feature-wise mean and std. img_fit,_ = meta_man.get_flatten_img_list( subj_train[:norm_fit_size]) # fit on a subset. print ">>> Fit image generator <<<"; sys.stdout.flush() fit_generator = imgen_trainval.flow_from_candid_roi( img_fit, target_height=img_height, target_scale=img_scale, class_mode=None, validation_mode=True, img_per_batch=len(img_fit), roi_per_img=roi_per_img, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, roi_clf=None, return_sample_weight=False, seed=random_seed) imgen_trainval.fit(fit_generator.next()) print "Estimates from %d images: mean=%.1f, std=%.1f." % \ (len(img_fit), imgen_trainval.mean, imgen_trainval.std) sys.stdout.flush() else: imgen_trainval.samplewise_center = True imgen_trainval.samplewise_std_normalization = True # Load ROI classifier. if roi_state is not None: roi_clf = load_model( roi_state, custom_objects={ 'sensitivity': DMMetrics.sensitivity, 'specificity': DMMetrics.specificity } ) graph = tf.get_default_graph() else: roi_clf = None graph = None # Set some DL training related parameters. if one_patch_mode: class_mode = 'binary' loss = 'binary_crossentropy' metrics = [DMMetrics.sensitivity, DMMetrics.specificity] else: class_mode = 'categorical' loss = 'categorical_crossentropy' metrics = ['accuracy', 'precision', 'recall'] if load_train_ram: validation_mode = True return_raw_img = True else: validation_mode = False return_raw_img = False # Create train and val generators. print ">>> Train image generator <<<"; sys.stdout.flush() train_generator = imgen_trainval.flow_from_candid_roi( img_train, lab_train, target_height=img_height, target_scale=img_scale, class_mode=class_mode, validation_mode=validation_mode, img_per_batch=img_per_batch, roi_per_img=roi_per_img, roi_size=roi_size, one_patch_mode=one_patch_mode, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, tf_graph=graph, roi_clf=roi_clf, clf_bs=clf_bs, cutpoint=cutpoint, amp_factor=amp_factor, return_sample_weight=return_sample_weight, auto_batch_balance=auto_batch_balance, all_neg_skip=all_neg_skip, shuffle=True, seed=random_seed, return_raw_img=return_raw_img) print ">>> Validation image generator <<<"; sys.stdout.flush() val_generator = imgen_trainval.flow_from_candid_roi( img_val, lab_val, target_height=img_height, target_scale=img_scale, class_mode=class_mode, validation_mode=True, img_per_batch=img_per_batch, roi_per_img=roi_per_img, roi_size=roi_size, one_patch_mode=one_patch_mode, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, tf_graph=graph, roi_clf=roi_clf, clf_bs=clf_bs, cutpoint=cutpoint, amp_factor=amp_factor, return_sample_weight=False, auto_batch_balance=False, seed=random_seed) # Load train and validation set into RAM. if one_patch_mode: nb_train_samples = len(img_train) nb_val_samples = len(img_val) else: nb_train_samples = len(img_train)*roi_per_img nb_val_samples = len(img_val)*roi_per_img if load_val_ram: print "Loading validation data into RAM.", sys.stdout.flush() validation_set = load_dat_ram(val_generator, nb_val_samples) print "Done."; sys.stdout.flush() sparse_y = to_sparse(validation_set[1]) for uy in np.unique(sparse_y): print "Nb of samples for class:%d = %d" % \ (uy, (sparse_y==uy).sum()) sys.stdout.flush() if load_train_ram: print "Loading train data into RAM.", sys.stdout.flush() train_set = load_dat_ram(train_generator, nb_train_samples) print "Done."; sys.stdout.flush() sparse_y = to_sparse(train_set[1]) for uy in np.unique(sparse_y): print "Nb of samples for class:%d = %d" % \ (uy, (sparse_y==uy).sum()) sys.stdout.flush() train_generator = imgen_trainval.flow( train_set[0], train_set[1], batch_size=clf_bs, auto_batch_balance=auto_batch_balance, no_pos_skip=no_pos_skip, balance_classes=balance_classes, shuffle=True, seed=random_seed) # Load or create model. if resume_from is not None: model = load_model( resume_from, custom_objects={ 'sensitivity': DMMetrics.sensitivity, 'specificity': DMMetrics.specificity } ) else: builder = ResNetBuilder if net == 'resnet18': model = builder.build_resnet_18( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) elif net == 'resnet34': model = builder.build_resnet_34( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) elif net == 'resnet50': model = builder.build_resnet_50( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) elif net == 'resnet101': model = builder.build_resnet_101( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) elif net == 'resnet152': model = builder.build_resnet_152( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) if gpu_count > 1: model = make_parallel(model, gpu_count) # Model training. sgd = SGD(lr=init_lr, momentum=0.9, decay=0.0, nesterov=True) model.compile(optimizer=sgd, loss=loss, metrics=metrics) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=lr_patience, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1) if load_val_ram: auc_checkpointer = DMAucModelCheckpoint( best_model, validation_set, batch_size=clf_bs) else: auc_checkpointer = DMAucModelCheckpoint( best_model, val_generator, nb_test_samples=nb_val_samples) hist = model.fit_generator( train_generator, samples_per_epoch=patches_per_epoch, nb_epoch=nb_epoch, validation_data=validation_set if load_val_ram else val_generator, nb_val_samples=nb_val_samples, callbacks=[reduce_lr, early_stopping, auc_checkpointer], # nb_worker=1, pickle_safe=False, nb_worker=nb_worker if load_train_ram else 1, pickle_safe=True if load_train_ram else False, verbose=2) if final_model != "NOSAVE": print "Saving final model to:", final_model; sys.stdout.flush() model.save(final_model) # Training report. min_loss_locs, = np.where(hist.history['val_loss'] == min(hist.history['val_loss'])) best_val_loss = hist.history['val_loss'][min_loss_locs[0]] if one_patch_mode: best_val_sensitivity = hist.history['val_sensitivity'][min_loss_locs[0]] best_val_specificity = hist.history['val_specificity'][min_loss_locs[0]] else: best_val_precision = hist.history['val_precision'][min_loss_locs[0]] best_val_recall = hist.history['val_recall'][min_loss_locs[0]] best_val_accuracy = hist.history['val_acc'][min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss if one_patch_mode: print "Best val sensitivity:", best_val_sensitivity print "Best val specificity:", best_val_specificity else: print "Best val precision:", best_val_precision print "Best val recall:", best_val_recall print "Best val accuracy:", best_val_accuracy # Make predictions on train, val, test exam lists. if best_model != 'NOSAVE': print "\n==== Making predictions ====" print "Load best model for prediction:", best_model sys.stdout.flush() pred_model = load_model(best_model) if gpu_count > 1: pred_model = make_parallel(pred_model, gpu_count) if pred_trainval: print "Load exam lists for train, val sets"; sys.stdout.flush() exam_train = meta_man.get_flatten_exam_list( subj_train, flatten_img_list=True) print "Train exam list length=", len(exam_train); sys.stdout.flush() exam_val = meta_man.get_flatten_exam_list( subj_val, flatten_img_list=True) print "Val exam list length=", len(exam_val); sys.stdout.flush() print "Load exam list for test set"; sys.stdout.flush() exam_test = meta_man.get_flatten_exam_list( subj_test, flatten_img_list=True) print "Test exam list length=", len(exam_test); sys.stdout.flush() if do_featurewise_norm: imgen_pred = DMImageDataGenerator() imgen_pred.featurewise_center = True imgen_pred.featurewise_std_normalization = True imgen_pred.mean = imgen_trainval.mean imgen_pred.std = imgen_trainval.std else: imgen_pred.samplewise_center = True imgen_pred.samplewise_std_normalization = True if pred_trainval: print "Make predictions on train exam list"; sys.stdout.flush() meta_prob_train = get_exam_pred( exam_train, pred_roi_per_img, imgen_pred, target_height=img_height, target_scale=img_scale, img_per_batch=pred_img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dl_model=pred_model) print "Train prediction list length=", len(meta_prob_train) print "Make predictions on val exam list"; sys.stdout.flush() meta_prob_val = get_exam_pred( exam_val, pred_roi_per_img, imgen_pred, target_height=img_height, target_scale=img_scale, img_per_batch=pred_img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dl_model=pred_model) print "Val prediction list length=", len(meta_prob_val) print "Make predictions on test exam list"; sys.stdout.flush() meta_prob_test = get_exam_pred( exam_test, pred_roi_per_img, imgen_pred, target_height=img_height, target_scale=img_scale, img_per_batch=pred_img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dl_model=pred_model) print "Test prediction list length=", len(meta_prob_test) if pred_trainval: pickle.dump((meta_prob_train, meta_prob_val, meta_prob_test), open(pred_out, 'w')) else: pickle.dump(meta_prob_test, open(pred_out, 'w')) return hist
def run(train_dir, val_dir, test_dir, img_size=[256, 256], img_scale=None, rescale_factor=None, featurewise_center=True, featurewise_mean=59.6, equalize_hist=True, augmentation=False, class_list=['background', 'malignant', 'benign'], batch_size=64, train_bs_multiplier=.5, nb_epoch=5, top_layer_epochs=10, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, net='resnet50', use_pretrained=True, nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, weight_decay2=.0001, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, resume_from=None, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, top_layer_nb=None, top_layer_multiplier=.1, all_layer_multiplier=.01, best_model='./modelState/patch_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for patch classifications ''' # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== # if featurewise_center: train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # Add augmentation options. if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 25. # in degree. train_imgen.shear_range = .2 # in radians. train_imgen.zoom_range = [.8, 1.2] # in proportion. train_imgen.channel_shift_range = 20. # in pixel intensity values. # ================= Model creation ============== # model, preprocess_input, top_layer_nb = get_dl_model( net, nb_class=len(class_list), use_pretrained=use_pretrained, resume_from=resume_from, img_size=img_size, top_layer_nb=top_layer_nb, weight_decay=weight_decay, hidden_dropout=hidden_dropout, nb_init_filter=nb_init_filter, init_filter_size=init_filter_size, init_conv_stride=init_conv_stride, pool_size=pool_size, pool_stride=pool_stride, alpha=alpha, l1_ratio=l1_ratio, inp_dropout=inp_dropout) if featurewise_center: preprocess_input = None if gpu_count > 1: model, org_model = make_parallel(model, gpu_count) else: org_model = model # ============ Train & validation set =============== # train_bs = int(batch_size*train_bs_multiplier) if net != 'yaroslav': dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done."; sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) print "Create generator for val set" validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print "Done."; sys.stdout.flush() # ==================== Model training ==================== # # Do 3-stage training. train_batches = int(train_generator.nb_sample/train_bs) + 1 if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample validation_steps = int(val_samples/batch_size) #### DEBUG #### # val_samples = 100 #### DEBUG #### # import pdb; pdb.set_trace() model, loss_hist, acc_hist = do_3stage_training( model, org_model, train_generator, validation_set, validation_steps, best_model, train_batches, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=use_pretrained, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, nb_class=len(class_list), pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, weight_decay2=weight_decay2, hidden_dropout2=hidden_dropout2) # Training report. if len(loss_hist) > 0: min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy if final_model != "NOSAVE": model.save(final_model) # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, rescale_factor=rescale_factor, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) print "Test samples =", test_generator.nb_sample print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_steps = int(test_generator.nb_sample/batch_size) #### DEBUG #### # test_samples = 10 #### DEBUG #### test_res = model.evaluate_generator( test_generator, test_steps, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res
def run(img_folder, dl_state, best_model, img_extension='dcm', img_height=1024, img_scale=255., equalize_hist=False, featurewise_center=False, featurewise_mean=91.6, neg_vs_pos_ratio=1., val_size=.1, test_size=.15, net='vgg19', batch_size=128, train_bs_multiplier=.5, patch_size=256, stride=8, roi_cutoff=.9, bkg_cutoff=[.5, 1.], sample_bkg=True, train_out='./scratch/train', val_out='./scratch/val', test_out='./scratch/test', out_img_ext='png', neg_name='benign', pos_name='malignant', bkg_name='background', augmentation=True, load_train_ram=False, load_val_ram=False, top_layer_nb=None, nb_epoch=10, top_layer_epochs=0, all_layer_epochs=0, optim='sgd', init_lr=.01, top_layer_multiplier=.01, all_layer_multiplier=.0001, es_patience=5, lr_patience=2, weight_decay2=.01, bias_multiplier=.1, hidden_dropout2=.0, exam_tsv='./metadata/exams_metadata.tsv', img_tsv='./metadata/images_crosswalk.tsv', out='./modelState/subj_lists.pkl'): '''Finetune a trained DL model on a different dataset ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) rng = RandomState(random_seed) # an rng used across board. nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Load and split image and label lists. meta_man = DMMetaManager(exam_tsv=exam_tsv, img_tsv=img_tsv, img_folder=img_folder, img_extension=img_extension) subj_list, subj_labs = meta_man.get_subj_labs() subj_labs = np.array(subj_labs) print "Found %d subjests" % (len(subj_list)) print "cancer patients=%d, normal patients=%d" \ % ((subj_labs==1).sum(), (subj_labs==0).sum()) if neg_vs_pos_ratio is not None: subj_list, subj_labs = DMMetaManager.subset_subj_list( subj_list, subj_labs, neg_vs_pos_ratio, random_seed) subj_labs = np.array(subj_labs) print "After subsetting, there are %d subjects" % (len(subj_list)) print "cancer patients=%d, normal patients=%d" \ % ((subj_labs==1).sum(), (subj_labs==0).sum()) subj_train, subj_test, labs_train, labs_test = train_test_split( subj_list, subj_labs, test_size=test_size, stratify=subj_labs, random_state=random_seed) subj_train, subj_val, labs_train, labs_val = train_test_split( subj_train, labs_train, test_size=val_size, stratify=labs_train, random_state=random_seed) # Get image lists. # >>>> Debug <<<< # # subj_train = subj_train[:5] # subj_val = subj_val[:5] # subj_test = subj_test[:5] # >>>> Debug <<<< # print "Get flattened image lists" img_train, ilab_train = meta_man.get_flatten_img_list(subj_train) img_val, ilab_val = meta_man.get_flatten_img_list(subj_val) img_test, ilab_test = meta_man.get_flatten_img_list(subj_test) ilab_train = np.array(ilab_train) ilab_val = np.array(ilab_val) ilab_test = np.array(ilab_test) print "On train set, positive img=%d, negative img=%d" \ % ((ilab_train==1).sum(), (ilab_train==0).sum()) print "On val set, positive img=%d, negative img=%d" \ % ((ilab_val==1).sum(), (ilab_val==0).sum()) print "On test set, positive img=%d, negative img=%d" \ % ((ilab_test==1).sum(), (ilab_test==0).sum()) sys.stdout.flush() # Save the subj lists. print "Saving subject lists to external files.", sys.stdout.flush() pickle.dump((subj_train, subj_val, subj_test), open(out, 'w')) print "Done." # Load DL model, preprocess function. print "Load patch classifier:", dl_state sys.stdout.flush() dl_model, preprocess_input, top_layer_nb = get_dl_model( net, use_pretrained=True, resume_from=dl_state, top_layer_nb=top_layer_nb) if featurewise_center: preprocess_input = None if gpu_count > 1: print "Make the model parallel on %d GPUs" % (gpu_count) sys.stdout.flush() dl_model, org_model = make_parallel(dl_model, gpu_count) parallelized = True else: org_model = dl_model parallelized = False # Sweep the whole images and classify patches. print "Score image patches and write them to:", train_out sys.stdout.flush() nb_roi_train, nb_bkg_train = score_write_patches( img_train, ilab_train, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(train_out, neg_name), pos_out=os.path.join(train_out, pos_name), bkg_out=os.path.join(train_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_train, nb_bkg_train) #### print "Score image patches and write them to:", val_out sys.stdout.flush() nb_roi_val, nb_bkg_val = score_write_patches( img_val, ilab_val, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(val_out, neg_name), pos_out=os.path.join(val_out, pos_name), bkg_out=os.path.join(val_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_val, nb_bkg_val) #### print "Score image patches and write them to:", test_out sys.stdout.flush() nb_roi_test, nb_bkg_test = score_write_patches( img_test, ilab_test, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(test_out, neg_name), pos_out=os.path.join(test_out, pos_name), bkg_out=os.path.join(test_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_test, nb_bkg_test) sys.stdout.flush() # ==== Image generators ==== # if featurewise_center: train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 45. train_imgen.shear_range = np.pi / 8. # ==== Train & val set ==== # # Note: the images are histogram equalized before they were written to # external folders. train_bs = int(batch_size * train_bs_multiplier) if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done." sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow(raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=True, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', auto_batch_balance=True, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) print "Create generator for val set" sys.stdout.flush() validation_set = val_imgen.flow_from_directory( val_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) val_samples = validation_set.nb_sample if parallelized and val_samples % batch_size != 0: val_samples -= val_samples % batch_size print "Validation samples =", val_samples sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, val_samples) print "Done." print "Loaded %d val samples" % (len(validation_set[0])) sys.stdout.flush() # ==== Model finetuning ==== # train_batches = int(train_generator.nb_sample / train_bs) + 1 samples_per_epoch = train_bs * train_batches # import pdb; pdb.set_trace() dl_model, loss_hist, acc_hist = do_3stage_training( dl_model, org_model, train_generator, validation_set, val_samples, best_model, samples_per_epoch, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=True, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=True, nb_worker=nb_worker, weight_decay2=weight_decay2, bias_multiplier=bias_multiplier, hidden_dropout2=hidden_dropout2) # Training report. min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" print "Create generator for test set" test_generator = test_imgen.flow_from_directory( test_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) test_samples = test_generator.nb_sample if parallelized and test_samples % batch_size != 0: test_samples -= test_samples % batch_size print "Test samples =", test_samples print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_res = dl_model.evaluate_generator( test_generator, test_samples, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res
def run(train_dir, val_dir, test_dir, img_size=[256, 256], img_scale=255., featurewise_center=True, featurewise_mean=59.6, equalize_hist=True, augmentation=False, class_list=['background', 'malignant', 'benign'], batch_size=64, train_bs_multiplier=.5, nb_epoch=5, top_layer_epochs=10, all_layer_epochs=20, load_val_ram=False, load_train_ram=False, net='resnet50', use_pretrained=True, nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, weight_decay2=.0001, bias_multiplier=.1, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, hidden_dropout2=.0, optim='sgd', init_lr=.01, lr_patience=10, es_patience=25, resume_from=None, auto_batch_balance=False, pos_cls_weight=1.0, neg_cls_weight=1.0, top_layer_nb=None, top_layer_multiplier=.1, all_layer_multiplier=.01, best_model='./modelState/patch_clf.h5', final_model="NOSAVE"): '''Train a deep learning model for patch classifications ''' # ======= Environmental variables ======== # random_seed = int(os.getenv('RANDOM_SEED', 12345)) nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # ========= Image generator ============== # # if use_pretrained: # use pretrained model's preprocessing. # train_imgen = DMImageDataGenerator() # val_imgen = DMImageDataGenerator() if featurewise_center: # fitgen = DMImageDataGenerator() # # Calculate pixel-level mean and std. # print "Create generator for mean and std fitting" # fit_patch_generator = fitgen.flow_from_directory( # train_dir, target_size=img_size, target_scale=img_scale, # classes=class_list, class_mode=None, batch_size=batch_size, # shuffle=True, seed=random_seed) # sys.stdout.flush() # fit_X_lst = [] # patches_seen = 0 # while patches_seen < fit_size: # X = fit_patch_generator.next() # fit_X_lst.append(X) # patches_seen += len(X) # fit_X_arr = np.concatenate(fit_X_lst) train_imgen = DMImageDataGenerator(featurewise_center=True) # featurewise_std_normalization=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) # featurewise_std_normalization=True) # train_imgen.fit(fit_X_arr) # print "Found mean=%.2f, std=%.2f" % (train_imgen.mean, train_imgen.std) # sys.stdout.flush() train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean # del fit_X_arr, fit_X_lst else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() # train_imgen = DMImageDataGenerator( # samplewise_center=True, # samplewise_std_normalization=True) # val_imgen = DMImageDataGenerator( # samplewise_center=True, # samplewise_std_normalization=True) # Add augmentation options. if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 45. train_imgen.shear_range = np.pi / 8. # ================= Model creation ============== # model, preprocess_input, top_layer_nb = get_dl_model( net, nb_class=len(class_list), use_pretrained=use_pretrained, resume_from=resume_from, img_size=img_size, top_layer_nb=top_layer_nb, weight_decay=weight_decay, bias_multiplier=bias_multiplier, hidden_dropout=hidden_dropout, nb_init_filter=nb_init_filter, init_filter_size=init_filter_size, init_conv_stride=init_conv_stride, pool_size=pool_size, pool_stride=pool_stride, alpha=alpha, l1_ratio=l1_ratio, inp_dropout=inp_dropout) if featurewise_center: preprocess_input = None if gpu_count > 1: model, org_model = make_parallel(model, gpu_count) else: org_model = model # ============ Train & validation set =============== # train_bs = int(batch_size * train_bs_multiplier) if use_pretrained: dup_3_channels = True else: dup_3_channels = False if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done." sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow( raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=auto_batch_balance, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_dir, target_size=img_size, target_scale=img_scale, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', auto_batch_balance=auto_batch_balance, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) # import pdb; pdb.set_trace() print "Create generator for val set" validation_set = val_imgen.flow_from_directory( val_dir, target_size=img_size, target_scale=img_scale, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, validation_set.nb_sample) print "Done." sys.stdout.flush() # ==================== Model training ==================== # # Callbacks and class weight. early_stopping = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1) checkpointer = ModelCheckpoint(best_model, monitor='val_acc', verbose=1, save_best_only=True) stdout_flush = DMFlush() callbacks = [early_stopping, checkpointer, stdout_flush] if optim == 'sgd': reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=lr_patience, verbose=1) callbacks.append(reduce_lr) if auto_batch_balance: class_weight = None elif len(class_list) == 2: class_weight = {0: 1.0, 1: pos_cls_weight} elif len(class_list) == 3: class_weight = {0: 1.0, 1: pos_cls_weight, 2: neg_cls_weight} else: class_weight = None # Do 3-stage training. train_batches = int(train_generator.nb_sample / train_bs) + 1 samples_per_epoch = train_bs * train_batches #### DEBUG #### # samples_per_epoch = train_bs*10 #### DEBUG #### if isinstance(validation_set, tuple): val_samples = len(validation_set[0]) else: val_samples = validation_set.nb_sample #### DEBUG #### # val_samples = 100 #### DEBUG #### model, loss_hist, acc_hist = do_3stage_training( model, org_model, train_generator, validation_set, val_samples, best_model, samples_per_epoch, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=use_pretrained, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=auto_batch_balance, pos_cls_weight=pos_cls_weight, neg_cls_weight=neg_cls_weight, nb_worker=nb_worker, weight_decay2=weight_decay2, bias_multiplier=bias_multiplier, hidden_dropout2=hidden_dropout2) # Training report. min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy if final_model != "NOSAVE": model.save(final_model) # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" test_generator = test_imgen.flow_from_directory( test_dir, target_size=img_size, target_scale=img_scale, equalize_hist=equalize_hist, dup_3_channels=dup_3_channels, classes=class_list, class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) print "Test samples =", test_generator.nb_sample print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_samples = test_generator.nb_sample #### DEBUG #### # test_samples = 10 #### DEBUG #### test_res = model.evaluate_generator( test_generator, test_samples, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res