def run(img_folder, img_height=1024, img_scale=4095, roi_per_img=32, roi_size=(256, 256), low_int_threshold=.05, blob_min_area=3, blob_min_int=.5, blob_max_int=.85, blob_th_step=10, roi_state=None, roi_bs=32, do_featurewise_norm=True, featurewise_mean=884.7, featurewise_std=745.3, img_tsv='./metadata/images_crosswalk_prediction.tsv', exam_tsv=None, dl_state=None, dl_bs=32, nb_top_avg=1, validation_mode=False, val_size=None, img_voting=False, out_pred='./output/predictions.tsv'): '''Run SC1 inference using the candidate ROI approach Notes: "mean=884.7, std=745.3" are estimated from 20 subjects on the training data. ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) # nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Setup data generator for inference. meta_man = DMMetaManager(img_tsv=img_tsv, exam_tsv=exam_tsv, img_folder=img_folder, img_extension='dcm') if val_size is not None: # Use a subset for validation. subj_list, subj_labs = meta_man.get_subj_labs() _, subj_test = train_test_split(subj_list, test_size=val_size, random_state=random_seed, stratify=subj_labs) else: subj_test = None if validation_mode: exam_list = meta_man.get_flatten_exam_list(subj_list=subj_test, flatten_img_list=True) else: exam_list = meta_man.get_last_exam_list(subj_list=subj_test, flatten_img_list=True) if do_featurewise_norm: img_gen = DMImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True) img_gen.mean = featurewise_mean img_gen.std = featurewise_std else: img_gen = DMImageDataGenerator(samplewise_center=True, samplewise_std_normalization=True) if validation_mode: class_mode = 'categorical' else: class_mode = None # Load ROI classifier. if roi_state is not None: roi_clf = load_model(roi_state, custom_objects={ 'sensitivity': DMMetrics.sensitivity, 'specificity': DMMetrics.specificity }) if gpu_count > 1: roi_clf = make_parallel(roi_clf, gpu_count) else: roi_clf = None # Load model. if dl_state is not None: model = load_model(dl_state) else: raise Exception('At least one model state must be specified.') if gpu_count > 1: model = make_parallel(model, gpu_count) # A function to make predictions on image patches from an image list. def pred_img_list(img_list): roi_generator = img_gen.flow_from_candid_roi( img_list, target_height=img_height, target_scale=img_scale, class_mode=class_mode, validation_mode=True, img_per_batch=len(img_list), roi_per_img=roi_per_img, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, roi_clf=roi_clf, clf_bs=roi_bs, return_sample_weight=True, seed=random_seed) roi_dat, roi_w = roi_generator.next() # import pdb; pdb.set_trace() pred = model.predict(roi_dat, batch_size=dl_bs) pred = pred[:, 1] # cancer class predictions. if roi_clf is not None: # return np.average(pred, weights=roi_w) # import pdb; pdb.set_trace() return pred[np.argsort(roi_w)[-nb_top_avg:]].mean() elif img_voting: pred = pred.reshape((-1, roi_per_img)) img_preds = [np.sort(row)[-nb_top_avg:].mean() for row in pred] return np.mean(img_preds) else: return np.sort(pred)[-nb_top_avg:].mean() # Print header. fout = open(out_pred, 'w') if validation_mode: fout.write(dminfer.INFER_HEADER_VAL) else: fout.write(dminfer.INFER_HEADER) for subj, exidx, exam in exam_list: try: predL = pred_img_list(exam['L']['img']) except KeyError: predL = .0 try: predR = pred_img_list(exam['R']['img']) except KeyError: predR = .0 try: cancerL = int(exam['L']['cancer']) except ValueError: cancerL = 0 try: cancerR = int(exam['R']['cancer']) except ValueError: cancerR = 0 if validation_mode: fout.write("%s\t%s\tL\t%f\t%d\n" % \ (str(subj), str(exidx), predL, cancerL)) fout.write("%s\t%s\tR\t%f\t%d\n" % \ (str(subj), str(exidx), predR, cancerR)) else: fout.write("%s\tL\t%f\n" % (str(subj), predL)) fout.write("%s\tR\t%f\n" % (str(subj), predR)) fout.close()
def run(img_folder, img_extension='dcm', img_height=1024, img_scale=4095, do_featurewise_norm=True, norm_fit_size=10, img_per_batch=2, roi_per_img=32, roi_size=(256, 256), one_patch_mode=False, low_int_threshold=.05, blob_min_area=3, blob_min_int=.5, blob_max_int=.85, blob_th_step=10, data_augmentation=False, roi_state=None, clf_bs=32, cutpoint=.5, amp_factor=1., return_sample_weight=True, auto_batch_balance=True, patches_per_epoch=12800, nb_epoch=20, neg_vs_pos_ratio=None, all_neg_skip=0., nb_init_filter=32, init_filter_size=5, init_conv_stride=2, pool_size=2, pool_stride=2, weight_decay=.0001, alpha=.0001, l1_ratio=.0, inp_dropout=.0, hidden_dropout=.0, init_lr=.01, test_size=.2, val_size=.0, lr_patience=3, es_patience=10, resume_from=None, net='resnet50', load_val_ram=False, load_train_ram=False, no_pos_skip=0., balance_classes=0., pred_img_per_batch=1, pred_roi_per_img=32, exam_tsv='./metadata/exams_metadata.tsv', img_tsv='./metadata/images_crosswalk.tsv', best_model='./modelState/dm_candidROI_best_model.h5', final_model="NOSAVE", pred_trainval=False, pred_out="dl_pred_out.pkl"): '''Run ResNet training on candidate ROIs from mammograms Args: norm_fit_size ([int]): the number of patients used to calculate feature-wise mean and std. ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) # Use of multiple CPU cores is not working! # When nb_worker>1 and pickle_safe=True, this error is encountered: # "failed to enqueue async memcpy from host to device: CUDA_ERROR_NOT_INITIALIZED" # To avoid the error, only this combination worked: # nb_worker=1 and pickle_safe=False. nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Setup training and validation data. # Load image or exam lists and split them into train and val sets. meta_man = DMMetaManager(exam_tsv=exam_tsv, img_tsv=img_tsv, img_folder=img_folder, img_extension=img_extension) # Split data based on subjects. subj_list, subj_labs = meta_man.get_subj_labs() subj_train, subj_test, slab_train, slab_test = train_test_split( subj_list, subj_labs, test_size=test_size, random_state=random_seed, stratify=subj_labs) if val_size > 0: # train/val split. subj_train, subj_val, slab_train, slab_val = train_test_split( subj_train, slab_train, test_size=val_size, random_state=random_seed, stratify=slab_train) else: # use test as val. make a copy of the test list. subj_val = list(subj_test) slab_val = list(slab_test) # import pdb; pdb.set_trace() # Subset subject lists to desired ratio. if neg_vs_pos_ratio is not None: subj_train, slab_train = DMMetaManager.subset_subj_list( subj_train, slab_train, neg_vs_pos_ratio, random_seed) subj_val, slab_val = DMMetaManager.subset_subj_list( subj_val, slab_val, neg_vs_pos_ratio, random_seed) print "After sampling, Nb of subjects for train=%d, val=%d, test=%d" \ % (len(subj_train), len(subj_val), len(subj_test)) # Get image and label lists. img_train, lab_train = meta_man.get_flatten_img_list(subj_train) img_val, lab_val = meta_man.get_flatten_img_list(subj_val) # Create image generators for train, fit and val. imgen_trainval = DMImageDataGenerator() if data_augmentation: imgen_trainval.horizontal_flip=True imgen_trainval.vertical_flip=True imgen_trainval.rotation_range = 45. imgen_trainval.shear_range = np.pi/8. # imgen_trainval.width_shift_range = .05 # imgen_trainval.height_shift_range = .05 # imgen_trainval.zoom_range = [.95, 1.05] if do_featurewise_norm: imgen_trainval.featurewise_center = True imgen_trainval.featurewise_std_normalization = True # Fit feature-wise mean and std. img_fit,_ = meta_man.get_flatten_img_list( subj_train[:norm_fit_size]) # fit on a subset. print ">>> Fit image generator <<<"; sys.stdout.flush() fit_generator = imgen_trainval.flow_from_candid_roi( img_fit, target_height=img_height, target_scale=img_scale, class_mode=None, validation_mode=True, img_per_batch=len(img_fit), roi_per_img=roi_per_img, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, roi_clf=None, return_sample_weight=False, seed=random_seed) imgen_trainval.fit(fit_generator.next()) print "Estimates from %d images: mean=%.1f, std=%.1f." % \ (len(img_fit), imgen_trainval.mean, imgen_trainval.std) sys.stdout.flush() else: imgen_trainval.samplewise_center = True imgen_trainval.samplewise_std_normalization = True # Load ROI classifier. if roi_state is not None: roi_clf = load_model( roi_state, custom_objects={ 'sensitivity': DMMetrics.sensitivity, 'specificity': DMMetrics.specificity } ) graph = tf.get_default_graph() else: roi_clf = None graph = None # Set some DL training related parameters. if one_patch_mode: class_mode = 'binary' loss = 'binary_crossentropy' metrics = [DMMetrics.sensitivity, DMMetrics.specificity] else: class_mode = 'categorical' loss = 'categorical_crossentropy' metrics = ['accuracy', 'precision', 'recall'] if load_train_ram: validation_mode = True return_raw_img = True else: validation_mode = False return_raw_img = False # Create train and val generators. print ">>> Train image generator <<<"; sys.stdout.flush() train_generator = imgen_trainval.flow_from_candid_roi( img_train, lab_train, target_height=img_height, target_scale=img_scale, class_mode=class_mode, validation_mode=validation_mode, img_per_batch=img_per_batch, roi_per_img=roi_per_img, roi_size=roi_size, one_patch_mode=one_patch_mode, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, tf_graph=graph, roi_clf=roi_clf, clf_bs=clf_bs, cutpoint=cutpoint, amp_factor=amp_factor, return_sample_weight=return_sample_weight, auto_batch_balance=auto_batch_balance, all_neg_skip=all_neg_skip, shuffle=True, seed=random_seed, return_raw_img=return_raw_img) print ">>> Validation image generator <<<"; sys.stdout.flush() val_generator = imgen_trainval.flow_from_candid_roi( img_val, lab_val, target_height=img_height, target_scale=img_scale, class_mode=class_mode, validation_mode=True, img_per_batch=img_per_batch, roi_per_img=roi_per_img, roi_size=roi_size, one_patch_mode=one_patch_mode, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, tf_graph=graph, roi_clf=roi_clf, clf_bs=clf_bs, cutpoint=cutpoint, amp_factor=amp_factor, return_sample_weight=False, auto_batch_balance=False, seed=random_seed) # Load train and validation set into RAM. if one_patch_mode: nb_train_samples = len(img_train) nb_val_samples = len(img_val) else: nb_train_samples = len(img_train)*roi_per_img nb_val_samples = len(img_val)*roi_per_img if load_val_ram: print "Loading validation data into RAM.", sys.stdout.flush() validation_set = load_dat_ram(val_generator, nb_val_samples) print "Done."; sys.stdout.flush() sparse_y = to_sparse(validation_set[1]) for uy in np.unique(sparse_y): print "Nb of samples for class:%d = %d" % \ (uy, (sparse_y==uy).sum()) sys.stdout.flush() if load_train_ram: print "Loading train data into RAM.", sys.stdout.flush() train_set = load_dat_ram(train_generator, nb_train_samples) print "Done."; sys.stdout.flush() sparse_y = to_sparse(train_set[1]) for uy in np.unique(sparse_y): print "Nb of samples for class:%d = %d" % \ (uy, (sparse_y==uy).sum()) sys.stdout.flush() train_generator = imgen_trainval.flow( train_set[0], train_set[1], batch_size=clf_bs, auto_batch_balance=auto_batch_balance, no_pos_skip=no_pos_skip, balance_classes=balance_classes, shuffle=True, seed=random_seed) # Load or create model. if resume_from is not None: model = load_model( resume_from, custom_objects={ 'sensitivity': DMMetrics.sensitivity, 'specificity': DMMetrics.specificity } ) else: builder = ResNetBuilder if net == 'resnet18': model = builder.build_resnet_18( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) elif net == 'resnet34': model = builder.build_resnet_34( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) elif net == 'resnet50': model = builder.build_resnet_50( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) elif net == 'resnet101': model = builder.build_resnet_101( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) elif net == 'resnet152': model = builder.build_resnet_152( (1, roi_size[0], roi_size[1]), 3, nb_init_filter, init_filter_size, init_conv_stride, pool_size, pool_stride, weight_decay, alpha, l1_ratio, inp_dropout, hidden_dropout) if gpu_count > 1: model = make_parallel(model, gpu_count) # Model training. sgd = SGD(lr=init_lr, momentum=0.9, decay=0.0, nesterov=True) model.compile(optimizer=sgd, loss=loss, metrics=metrics) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=lr_patience, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1) if load_val_ram: auc_checkpointer = DMAucModelCheckpoint( best_model, validation_set, batch_size=clf_bs) else: auc_checkpointer = DMAucModelCheckpoint( best_model, val_generator, nb_test_samples=nb_val_samples) hist = model.fit_generator( train_generator, samples_per_epoch=patches_per_epoch, nb_epoch=nb_epoch, validation_data=validation_set if load_val_ram else val_generator, nb_val_samples=nb_val_samples, callbacks=[reduce_lr, early_stopping, auc_checkpointer], # nb_worker=1, pickle_safe=False, nb_worker=nb_worker if load_train_ram else 1, pickle_safe=True if load_train_ram else False, verbose=2) if final_model != "NOSAVE": print "Saving final model to:", final_model; sys.stdout.flush() model.save(final_model) # Training report. min_loss_locs, = np.where(hist.history['val_loss'] == min(hist.history['val_loss'])) best_val_loss = hist.history['val_loss'][min_loss_locs[0]] if one_patch_mode: best_val_sensitivity = hist.history['val_sensitivity'][min_loss_locs[0]] best_val_specificity = hist.history['val_specificity'][min_loss_locs[0]] else: best_val_precision = hist.history['val_precision'][min_loss_locs[0]] best_val_recall = hist.history['val_recall'][min_loss_locs[0]] best_val_accuracy = hist.history['val_acc'][min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss if one_patch_mode: print "Best val sensitivity:", best_val_sensitivity print "Best val specificity:", best_val_specificity else: print "Best val precision:", best_val_precision print "Best val recall:", best_val_recall print "Best val accuracy:", best_val_accuracy # Make predictions on train, val, test exam lists. if best_model != 'NOSAVE': print "\n==== Making predictions ====" print "Load best model for prediction:", best_model sys.stdout.flush() pred_model = load_model(best_model) if gpu_count > 1: pred_model = make_parallel(pred_model, gpu_count) if pred_trainval: print "Load exam lists for train, val sets"; sys.stdout.flush() exam_train = meta_man.get_flatten_exam_list( subj_train, flatten_img_list=True) print "Train exam list length=", len(exam_train); sys.stdout.flush() exam_val = meta_man.get_flatten_exam_list( subj_val, flatten_img_list=True) print "Val exam list length=", len(exam_val); sys.stdout.flush() print "Load exam list for test set"; sys.stdout.flush() exam_test = meta_man.get_flatten_exam_list( subj_test, flatten_img_list=True) print "Test exam list length=", len(exam_test); sys.stdout.flush() if do_featurewise_norm: imgen_pred = DMImageDataGenerator() imgen_pred.featurewise_center = True imgen_pred.featurewise_std_normalization = True imgen_pred.mean = imgen_trainval.mean imgen_pred.std = imgen_trainval.std else: imgen_pred.samplewise_center = True imgen_pred.samplewise_std_normalization = True if pred_trainval: print "Make predictions on train exam list"; sys.stdout.flush() meta_prob_train = get_exam_pred( exam_train, pred_roi_per_img, imgen_pred, target_height=img_height, target_scale=img_scale, img_per_batch=pred_img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dl_model=pred_model) print "Train prediction list length=", len(meta_prob_train) print "Make predictions on val exam list"; sys.stdout.flush() meta_prob_val = get_exam_pred( exam_val, pred_roi_per_img, imgen_pred, target_height=img_height, target_scale=img_scale, img_per_batch=pred_img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dl_model=pred_model) print "Val prediction list length=", len(meta_prob_val) print "Make predictions on test exam list"; sys.stdout.flush() meta_prob_test = get_exam_pred( exam_test, pred_roi_per_img, imgen_pred, target_height=img_height, target_scale=img_scale, img_per_batch=pred_img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dl_model=pred_model) print "Test prediction list length=", len(meta_prob_test) if pred_trainval: pickle.dump((meta_prob_train, meta_prob_val, meta_prob_test), open(pred_out, 'w')) else: pickle.dump(meta_prob_test, open(pred_out, 'w')) return hist
def run(img_folder, dl_state, best_model, img_extension='dcm', img_height=1024, img_scale=255., equalize_hist=False, featurewise_center=False, featurewise_mean=91.6, neg_vs_pos_ratio=1., val_size=.1, test_size=.15, net='vgg19', batch_size=128, train_bs_multiplier=.5, patch_size=256, stride=8, roi_cutoff=.9, bkg_cutoff=[.5, 1.], sample_bkg=True, train_out='./scratch/train', val_out='./scratch/val', test_out='./scratch/test', out_img_ext='png', neg_name='benign', pos_name='malignant', bkg_name='background', augmentation=True, load_train_ram=False, load_val_ram=False, top_layer_nb=None, nb_epoch=10, top_layer_epochs=0, all_layer_epochs=0, optim='sgd', init_lr=.01, top_layer_multiplier=.01, all_layer_multiplier=.0001, es_patience=5, lr_patience=2, weight_decay2=.01, bias_multiplier=.1, hidden_dropout2=.0, exam_tsv='./metadata/exams_metadata.tsv', img_tsv='./metadata/images_crosswalk.tsv', out='./modelState/subj_lists.pkl'): '''Finetune a trained DL model on a different dataset ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) rng = RandomState(random_seed) # an rng used across board. nb_worker = int(os.getenv('NUM_CPU_CORES', 4)) gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Load and split image and label lists. meta_man = DMMetaManager(exam_tsv=exam_tsv, img_tsv=img_tsv, img_folder=img_folder, img_extension=img_extension) subj_list, subj_labs = meta_man.get_subj_labs() subj_labs = np.array(subj_labs) print "Found %d subjests" % (len(subj_list)) print "cancer patients=%d, normal patients=%d" \ % ((subj_labs==1).sum(), (subj_labs==0).sum()) if neg_vs_pos_ratio is not None: subj_list, subj_labs = DMMetaManager.subset_subj_list( subj_list, subj_labs, neg_vs_pos_ratio, random_seed) subj_labs = np.array(subj_labs) print "After subsetting, there are %d subjects" % (len(subj_list)) print "cancer patients=%d, normal patients=%d" \ % ((subj_labs==1).sum(), (subj_labs==0).sum()) subj_train, subj_test, labs_train, labs_test = train_test_split( subj_list, subj_labs, test_size=test_size, stratify=subj_labs, random_state=random_seed) subj_train, subj_val, labs_train, labs_val = train_test_split( subj_train, labs_train, test_size=val_size, stratify=labs_train, random_state=random_seed) # Get image lists. # >>>> Debug <<<< # # subj_train = subj_train[:5] # subj_val = subj_val[:5] # subj_test = subj_test[:5] # >>>> Debug <<<< # print "Get flattened image lists" img_train, ilab_train = meta_man.get_flatten_img_list(subj_train) img_val, ilab_val = meta_man.get_flatten_img_list(subj_val) img_test, ilab_test = meta_man.get_flatten_img_list(subj_test) ilab_train = np.array(ilab_train) ilab_val = np.array(ilab_val) ilab_test = np.array(ilab_test) print "On train set, positive img=%d, negative img=%d" \ % ((ilab_train==1).sum(), (ilab_train==0).sum()) print "On val set, positive img=%d, negative img=%d" \ % ((ilab_val==1).sum(), (ilab_val==0).sum()) print "On test set, positive img=%d, negative img=%d" \ % ((ilab_test==1).sum(), (ilab_test==0).sum()) sys.stdout.flush() # Save the subj lists. print "Saving subject lists to external files.", sys.stdout.flush() pickle.dump((subj_train, subj_val, subj_test), open(out, 'w')) print "Done." # Load DL model, preprocess function. print "Load patch classifier:", dl_state sys.stdout.flush() dl_model, preprocess_input, top_layer_nb = get_dl_model( net, use_pretrained=True, resume_from=dl_state, top_layer_nb=top_layer_nb) if featurewise_center: preprocess_input = None if gpu_count > 1: print "Make the model parallel on %d GPUs" % (gpu_count) sys.stdout.flush() dl_model, org_model = make_parallel(dl_model, gpu_count) parallelized = True else: org_model = dl_model parallelized = False # Sweep the whole images and classify patches. print "Score image patches and write them to:", train_out sys.stdout.flush() nb_roi_train, nb_bkg_train = score_write_patches( img_train, ilab_train, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(train_out, neg_name), pos_out=os.path.join(train_out, pos_name), bkg_out=os.path.join(train_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_train, nb_bkg_train) #### print "Score image patches and write them to:", val_out sys.stdout.flush() nb_roi_val, nb_bkg_val = score_write_patches( img_val, ilab_val, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(val_out, neg_name), pos_out=os.path.join(val_out, pos_name), bkg_out=os.path.join(val_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_val, nb_bkg_val) #### print "Score image patches and write them to:", test_out sys.stdout.flush() nb_roi_test, nb_bkg_test = score_write_patches( img_test, ilab_test, img_height, img_scale, patch_size, stride, dl_model, batch_size, neg_out=os.path.join(test_out, neg_name), pos_out=os.path.join(test_out, pos_name), bkg_out=os.path.join(test_out, bkg_name), preprocess=preprocess_input, equalize_hist=equalize_hist, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, roi_cutoff=roi_cutoff, bkg_cutoff=bkg_cutoff, sample_bkg=sample_bkg, img_ext=out_img_ext, random_seed=random_seed, parallelized=parallelized) print "Wrote %d ROI and %d bkg patches" % (nb_roi_test, nb_bkg_test) sys.stdout.flush() # ==== Image generators ==== # if featurewise_center: train_imgen = DMImageDataGenerator(featurewise_center=True) val_imgen = DMImageDataGenerator(featurewise_center=True) test_imgen = DMImageDataGenerator(featurewise_center=True) train_imgen.mean = featurewise_mean val_imgen.mean = featurewise_mean test_imgen.mean = featurewise_mean else: train_imgen = DMImageDataGenerator() val_imgen = DMImageDataGenerator() test_imgen = DMImageDataGenerator() if augmentation: train_imgen.horizontal_flip = True train_imgen.vertical_flip = True train_imgen.rotation_range = 45. train_imgen.shear_range = np.pi / 8. # ==== Train & val set ==== # # Note: the images are histogram equalized before they were written to # external folders. train_bs = int(batch_size * train_bs_multiplier) if load_train_ram: raw_imgen = DMImageDataGenerator() print "Create generator for raw train set" raw_generator = raw_imgen.flow_from_directory( train_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=train_bs, shuffle=False) print "Loading raw train set into RAM.", sys.stdout.flush() raw_set = load_dat_ram(raw_generator, raw_generator.nb_sample) print "Done." sys.stdout.flush() print "Create generator for train set" train_generator = train_imgen.flow(raw_set[0], raw_set[1], batch_size=train_bs, auto_batch_balance=True, preprocess=preprocess_input, shuffle=True, seed=random_seed) else: print "Create generator for train set" train_generator = train_imgen.flow_from_directory( train_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', auto_batch_balance=True, batch_size=train_bs, preprocess=preprocess_input, shuffle=True, seed=random_seed) print "Create generator for val set" sys.stdout.flush() validation_set = val_imgen.flow_from_directory( val_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) val_samples = validation_set.nb_sample if parallelized and val_samples % batch_size != 0: val_samples -= val_samples % batch_size print "Validation samples =", val_samples sys.stdout.flush() if load_val_ram: print "Loading validation set into RAM.", sys.stdout.flush() validation_set = load_dat_ram(validation_set, val_samples) print "Done." print "Loaded %d val samples" % (len(validation_set[0])) sys.stdout.flush() # ==== Model finetuning ==== # train_batches = int(train_generator.nb_sample / train_bs) + 1 samples_per_epoch = train_bs * train_batches # import pdb; pdb.set_trace() dl_model, loss_hist, acc_hist = do_3stage_training( dl_model, org_model, train_generator, validation_set, val_samples, best_model, samples_per_epoch, top_layer_nb, net, nb_epoch=nb_epoch, top_layer_epochs=top_layer_epochs, all_layer_epochs=all_layer_epochs, use_pretrained=True, optim=optim, init_lr=init_lr, top_layer_multiplier=top_layer_multiplier, all_layer_multiplier=all_layer_multiplier, es_patience=es_patience, lr_patience=lr_patience, auto_batch_balance=True, nb_worker=nb_worker, weight_decay2=weight_decay2, bias_multiplier=bias_multiplier, hidden_dropout2=hidden_dropout2) # Training report. min_loss_locs, = np.where(loss_hist == min(loss_hist)) best_val_loss = loss_hist[min_loss_locs[0]] best_val_accuracy = acc_hist[min_loss_locs[0]] print "\n==== Training summary ====" print "Minimum val loss achieved at epoch:", min_loss_locs[0] + 1 print "Best val loss:", best_val_loss print "Best val accuracy:", best_val_accuracy # ==== Predict on test set ==== # print "\n==== Predicting on test set ====" print "Create generator for test set" test_generator = test_imgen.flow_from_directory( test_out, target_size=(patch_size, patch_size), target_scale=img_scale, equalize_hist=False, dup_3_channels=True, classes=[bkg_name, pos_name, neg_name], class_mode='categorical', batch_size=batch_size, preprocess=preprocess_input, shuffle=False) test_samples = test_generator.nb_sample if parallelized and test_samples % batch_size != 0: test_samples -= test_samples % batch_size print "Test samples =", test_samples print "Load saved best model:", best_model + '.', sys.stdout.flush() org_model.load_weights(best_model) print "Done." test_res = dl_model.evaluate_generator( test_generator, test_samples, nb_worker=nb_worker, pickle_safe=True if nb_worker > 1 else False) print "Evaluation result on test set:", test_res
def run(img_folder, dl_state, img_extension='dcm', img_height=1024, img_scale=4095, val_size=.2, neg_vs_pos_ratio=10., do_featurewise_norm=True, featurewise_mean=873.6, featurewise_std=739.3, img_per_batch=2, roi_per_img=32, roi_size=(256, 256), low_int_threshold=.05, blob_min_area=3, blob_min_int=.5, blob_max_int=.85, blob_th_step=10, exam_tsv='./metadata/exams_metadata.tsv', img_tsv='./metadata/images_crosswalk.tsv', train_out='./modelState/meta_prob_train.pkl', test_out='./modelState/meta_prob_test.pkl'): '''Calculate bag of deep visual words count matrix for all breasts ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) rng = RandomState(random_seed) # an rng used across board. gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Load and split image and label lists. meta_man = DMMetaManager(exam_tsv=exam_tsv, img_tsv=img_tsv, img_folder=img_folder, img_extension=img_extension) subj_list, subj_labs = meta_man.get_subj_labs() subj_train, subj_test, labs_train, labs_test = train_test_split( subj_list, subj_labs, test_size=val_size, stratify=subj_labs, random_state=random_seed) if neg_vs_pos_ratio is not None: def subset_subj(subj, labs): subj = np.array(subj) labs = np.array(labs) pos_idx = np.where(labs==1)[0] neg_idx = np.where(labs==0)[0] nb_neg_desired = int(len(pos_idx)*neg_vs_pos_ratio) if nb_neg_desired >= len(neg_idx): return subj.tolist() else: neg_chosen = rng.choice(neg_idx, nb_neg_desired, replace=False) subset_idx = np.concatenate([pos_idx, neg_chosen]) return subj[subset_idx].tolist() subj_train = subset_subj(subj_train, labs_train) subj_test = subset_subj(subj_test, labs_test) # Create image generator for ROIs for representation extraction. print "Create an image generator for ROIs"; sys.stdout.flush() if do_featurewise_norm: imgen = DMImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True) imgen.mean = featurewise_mean imgen.std = featurewise_std else: imgen = DMImageDataGenerator( samplewise_center=True, samplewise_std_normalization=True) # Load DL model. print "Load DL classification model:", dl_state; sys.stdout.flush() dl_model = load_model( dl_state, custom_objects={ 'sensitivity': dmm.sensitivity, 'specificity': dmm.specificity } ) if gpu_count > 1: print "Make the model parallel on %d GPUs" % (gpu_count) sys.stdout.flush() dl_model = make_parallel(dl_model, gpu_count) # Read exam lists. exam_train = meta_man.get_flatten_exam_list( subj_train, flatten_img_list=True) exam_test = meta_man.get_flatten_exam_list( subj_test, flatten_img_list=True) exam_labs_train = np.array(meta_man.exam_labs(exam_train)) exam_labs_test = np.array(meta_man.exam_labs(exam_test)) nb_pos_exams_train = (exam_labs_train==1).sum() nb_neg_exams_train = (exam_labs_train==0).sum() nb_pos_exams_test = (exam_labs_test==1).sum() nb_neg_exams_test = (exam_labs_test==0).sum() print "Train set - Nb of pos exams: %d, Nb of neg exams: %d" % \ (nb_pos_exams_train, nb_neg_exams_train) print "Test set - Nb of pos exams: %d, Nb of neg exams: %d" % \ (nb_pos_exams_test, nb_neg_exams_test) # Make predictions for exam lists. print "Predicting for train exam list"; sys.stdout.flush() meta_prob_train = get_exam_pred( exam_train, roi_per_img, imgen, target_height=img_height, target_scale=img_scale, img_per_batch=img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dl_model=dl_model) print "Length of train prediction list:", len(meta_prob_train) sys.stdout.flush() print "Predicting for test exam list"; sys.stdout.flush() meta_prob_test = get_exam_pred( exam_test, roi_per_img, imgen, target_height=img_height, target_scale=img_scale, img_per_batch=img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dl_model=dl_model) print "Length of test prediction list:", len(meta_prob_test) sys.stdout.flush() pickle.dump(meta_prob_train, open(train_out, 'w')) pickle.dump(meta_prob_test, open(test_out, 'w')) print "Done."
def run(img_folder, dl_state, img_extension='dcm', img_height=1024, img_scale=255., equalize_hist=False, featurewise_center=False, featurewise_mean=91.6, neg_vs_pos_ratio=1., net='vgg19', batch_size=128, patch_size=256, stride=8, exam_tsv='./metadata/exams_metadata.tsv', img_tsv='./metadata/images_crosswalk.tsv', out='./modelState/prob_heatmap.pkl', predicted_subj_file=None, add_subjs=500): '''Sweep mammograms with trained DL model to create prob heatmaps ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) rng = RandomState(random_seed) # an rng used across board. gpu_count = int(os.getenv('NUM_GPU_DEVICES', 1)) # Load and split image and label lists. meta_man = DMMetaManager(exam_tsv=exam_tsv, img_tsv=img_tsv, img_folder=img_folder, img_extension=img_extension) subj_list, subj_labs = meta_man.get_subj_labs() subj_labs = np.array(subj_labs) print "Found %d subjests" % (len(subj_list)) print "cancer patients=%d, normal patients=%d" \ % ((subj_labs==1).sum(), (subj_labs==0).sum()) if predicted_subj_file is not None: predicted_subjs = np.load(predicted_subj_file) subj_list = np.setdiff1d(subj_list, predicted_subjs) subj_list = subj_list[:add_subjs] print "Will predict additional %d subjects" % (len(subj_list)) elif neg_vs_pos_ratio is not None: subj_list, subj_labs = DMMetaManager.subset_subj_list( subj_list, subj_labs, neg_vs_pos_ratio, random_seed) subj_labs = np.array(subj_labs) print "After subsetting, there are %d subjects" % (len(subj_list)) print "cancer patients=%d, normal patients=%d" \ % ((subj_labs==1).sum(), (subj_labs==0).sum()) # Get exam lists. # >>>> Debug <<<< # # subj_list = subj_list[:2] # >>>> Debug <<<< # print "Get flattened exam list" exam_list = meta_man.get_flatten_exam_list(subj_list, cc_mlo_only=True) exam_labs = meta_man.exam_labs(exam_list) exam_labs = np.array(exam_labs) print "positive exams=%d, negative exams=%d" \ % ((exam_labs==1).sum(), (exam_labs==0).sum()) sys.stdout.flush() # Load DL model. print "Load patch classifier:", dl_state sys.stdout.flush() dl_model = load_model(dl_state, custom_objects={ 'sensitivity': dmm.sensitivity, 'specificity': dmm.specificity }) if gpu_count > 1: print "Make the model parallel on %d GPUs" % (gpu_count) sys.stdout.flush() dl_model, _ = make_parallel(dl_model, gpu_count) parallelized = True else: parallelized = False # Load preprocess function. if featurewise_center: preprocess_input = None else: print "Load preprocess function for net:", net if net == 'resnet50': from keras.applications.resnet50 import preprocess_input elif net == 'vgg16': from keras.applications.vgg16 import preprocess_input elif net == 'vgg19': from keras.applications.vgg19 import preprocess_input elif net == 'xception': from keras.applications.xception import preprocess_input elif net == 'inception': from keras.applications.inception_v3 import preprocess_input else: raise Exception("Pretrained model is not available: " + net) # Sweep the whole images and classify patches. print "Generate prob heatmaps for exam list" sys.stdout.flush() heatmap_dat_list = [] for i, e in enumerate(exam_list): dat = (e[0], e[1], { 'L': { 'cancer': e[2]['L']['cancer'] }, 'R': { 'cancer': e[2]['R']['cancer'] } }) dat[2]['L']['CC'] = get_prob_heatmap( e[2]['L']['CC'], img_height, img_scale, patch_size, stride, dl_model, batch_size, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, preprocess=preprocess_input, parallelized=parallelized, equalize_hist=equalize_hist) dat[2]['L']['MLO'] = get_prob_heatmap( e[2]['L']['MLO'], img_height, img_scale, patch_size, stride, dl_model, batch_size, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, preprocess=preprocess_input, parallelized=parallelized, equalize_hist=equalize_hist) dat[2]['R']['CC'] = get_prob_heatmap( e[2]['R']['CC'], img_height, img_scale, patch_size, stride, dl_model, batch_size, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, preprocess=preprocess_input, parallelized=parallelized, equalize_hist=equalize_hist) dat[2]['R']['MLO'] = get_prob_heatmap( e[2]['R']['MLO'], img_height, img_scale, patch_size, stride, dl_model, batch_size, featurewise_center=featurewise_center, featurewise_mean=featurewise_mean, preprocess=preprocess_input, parallelized=parallelized, equalize_hist=equalize_hist) heatmap_dat_list.append(dat) print "processed %d/%d exams" % (i + 1, len(exam_list)) sys.stdout.flush() ### DEBUG ### # if i >= 1: # break ### DEBUG ### print "Done." # Save the result. print "Saving result to external files.", sys.stdout.flush() pickle.dump(heatmap_dat_list, open(out, 'w')) print "Done."
def run(img_folder, dl_state, img_extension='dcm', img_height=1024, img_scale=4095, val_size=.2, neg_vs_pos_ratio=10., do_featurewise_norm=True, featurewise_mean=873.6, featurewise_std=739.3, img_per_batch=2, roi_per_img=32, roi_size=(256, 256), low_int_threshold=.05, blob_min_area=3, blob_min_int=.5, blob_max_int=.85, blob_th_step=10, layer_name=['flatten_1', 'dense_1'], layer_index=None, roi_state=None, roi_clf_bs=32, pc_components=.95, pc_whiten=True, nb_words=[512], km_max_iter=100, km_bs=1000, km_patience=20, km_init=10, exam_tsv='./metadata/exams_metadata.tsv', img_tsv='./metadata/images_crosswalk.tsv', pca_km_states='./modelState/dlrepr_pca_km_models.pkl', bow_train_out='./modelState/bow_dat_train.pkl', bow_test_out='./modelState/bow_dat_test.pkl'): '''Calculate bag of deep visual words count matrix for all breasts ''' # Read some env variables. random_seed = int(os.getenv('RANDOM_SEED', 12345)) rng = RandomState(random_seed) # an rng used across board. # Load and split image and label lists. meta_man = DMMetaManager(exam_tsv=exam_tsv, img_tsv=img_tsv, img_folder=img_folder, img_extension=img_extension) subj_list, subj_labs = meta_man.get_subj_labs() subj_train, subj_test, labs_train, labs_test = train_test_split( subj_list, subj_labs, test_size=val_size, stratify=subj_labs, random_state=random_seed) if neg_vs_pos_ratio is not None: def subset_subj(subj, labs): subj = np.array(subj) labs = np.array(labs) pos_idx = np.where(labs == 1)[0] neg_idx = np.where(labs == 0)[0] nb_neg_desired = int(len(pos_idx) * neg_vs_pos_ratio) if nb_neg_desired >= len(neg_idx): return subj.tolist() else: neg_chosen = rng.choice(neg_idx, nb_neg_desired, replace=False) subset_idx = np.concatenate([pos_idx, neg_chosen]) return subj[subset_idx].tolist() subj_train = subset_subj(subj_train, labs_train) subj_test = subset_subj(subj_test, labs_test) img_list, lab_list = meta_man.get_flatten_img_list(subj_train) lab_list = np.array(lab_list) print "Train set - Nb of positive images: %d, Nb of negative images: %d" \ % ( (lab_list==1).sum(), (lab_list==0).sum()) sys.stdout.flush() # Create image generator for ROIs for representation extraction. print "Create an image generator for ROIs" sys.stdout.flush() if do_featurewise_norm: imgen = DMImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True) imgen.mean = featurewise_mean imgen.std = featurewise_std else: imgen = DMImageDataGenerator(samplewise_center=True, samplewise_std_normalization=True) # Load ROI classifier. if roi_state is not None: print "Load ROI classifier" sys.stdout.flush() roi_clf = load_model(roi_state, custom_objects={ 'sensitivity': dmm.sensitivity, 'specificity': dmm.specificity }) graph = tf.get_default_graph() else: roi_clf = None graph = None # Create ROI generators for pos and neg images separately. print "Create ROI generators for pos and neg images" sys.stdout.flush() roi_generator = imgen.flow_from_candid_roi( img_list, target_height=img_height, target_scale=img_scale, class_mode=None, validation_mode=True, img_per_batch=img_per_batch, roi_per_img=roi_per_img, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, tf_graph=graph, roi_clf=roi_clf, clf_bs=roi_clf_bs, return_sample_weight=False, seed=random_seed) # Generate image patches and extract their DL representations. print "Load DL representation model" sys.stdout.flush() dlrepr_model = DLRepr(dl_state, custom_objects={ 'sensitivity': dmm.sensitivity, 'specificity': dmm.specificity }, layer_name=layer_name, layer_index=layer_index) last_output_size = dlrepr_model.get_output_shape()[-1][-1] if last_output_size != 3 and last_output_size != 1: raise Exception("The last output must be prob outputs (size=3 or 1)") nb_tot_samples = len(img_list) * roi_per_img print "Extract ROIs from pos and neg images" sys.stdout.flush() pred = dlrepr_model.predict_generator(roi_generator, val_samples=nb_tot_samples) for i, d in enumerate(pred): print "Shape of representation/output data %d:" % (i), d.shape sys.stdout.flush() # Flatten feature maps, e.g. an 8x8 feature map will become a 64-d vector. pred = [d.reshape((-1, d.shape[-1])) for d in pred] for i, d in enumerate(pred): print "Shape of flattened data %d:" % (i), d.shape sys.stdout.flush() # Split representations and prob outputs. dl_repr = pred[0] prob_out = pred[1] if prob_out.shape[1] == 3: prob_out = prob_out[:, 1] # pos class. prob_out = prob_out.reshape((len(img_list), -1)) print "Reshape prob output to:", prob_out.shape sys.stdout.flush() # Use PCA to reduce dimension of the representation data. if pc_components is not None: print "Start PCA dimension reduction on DL representation" sys.stdout.flush() pca = PCA(n_components=pc_components, whiten=pc_whiten) pca.fit(dl_repr) print "Nb of PCA components:", pca.n_components_ print "Total explained variance ratio: %.4f" % \ (pca.explained_variance_ratio_.sum()) dl_repr_pca = pca.transform(dl_repr) print "Shape of transformed representation data:", dl_repr_pca.shape sys.stdout.flush() else: pca = None # Use K-means to create a codebook for deep visual words. print "Start K-means training on DL representation" sys.stdout.flush() clf_list = [] clust_list = [] # Shuffling indices for mini-batches learning. perm_idx = rng.permutation(len(dl_repr)) for n in nb_words: print "Train K-means with %d cluster centers" % (n) sys.stdout.flush() clf = MiniBatchKMeans(n_clusters=n, init='k-means++', max_iter=km_max_iter, batch_size=km_bs, compute_labels=True, random_state=random_seed, tol=0.0, max_no_improvement=km_patience, init_size=None, n_init=km_init, reassignment_ratio=0.01, verbose=0) clf.fit(dl_repr[perm_idx]) clf_list.append(clf) clust = np.zeros_like(clf.labels_) clust[perm_idx] = clf.labels_ clust = clust.reshape((len(img_list), -1)) clust_list.append(clust) if pca is not None: print "Start K-means training on transformed representation" sys.stdout.flush() clf_list_pca = [] clust_list_pca = [] # Shuffling indices for mini-batches learning. perm_idx = rng.permutation(len(dl_repr_pca)) for n in nb_words: print "Train K-means with %d cluster centers" % (n) sys.stdout.flush() clf = MiniBatchKMeans(n_clusters=n, init='k-means++', max_iter=km_max_iter, batch_size=km_bs, compute_labels=True, random_state=random_seed, tol=0.0, max_no_improvement=km_patience, init_size=None, n_init=km_init, reassignment_ratio=0.01, verbose=0) clf.fit(dl_repr_pca[perm_idx]) clf_list_pca.append(clf) clust = np.zeros_like(clf.labels_) clust[perm_idx] = clf.labels_ clust = clust.reshape((len(img_list), -1)) clust_list_pca.append(clust) # Read exam lists. exam_train = meta_man.get_flatten_exam_list(subj_train, flatten_img_list=True) exam_test = meta_man.get_flatten_exam_list(subj_test, flatten_img_list=True) exam_labs_train = np.array(meta_man.exam_labs(exam_train)) exam_labs_test = np.array(meta_man.exam_labs(exam_test)) nb_pos_exams_train = (exam_labs_train == 1).sum() nb_neg_exams_train = (exam_labs_train == 0).sum() nb_pos_exams_test = (exam_labs_test == 1).sum() nb_neg_exams_test = (exam_labs_test == 0).sum() print "Train set - Nb of pos exams: %d, Nb of neg exams: %d" % \ (nb_pos_exams_train, nb_neg_exams_train) print "Test set - Nb of pos exams: %d, Nb of neg exams: %d" % \ (nb_pos_exams_test, nb_neg_exams_test) # Do BoW counts for each breast. print "BoW counting for train exam list" sys.stdout.flush() bow_dat_train = get_exam_bow_dat(exam_train, nb_words, roi_per_img, img_list=img_list, prob_out=prob_out, clust_list=clust_list) for i, d in enumerate(bow_dat_train[1]): print "Shape of train BoW matrix %d:" % (i), d.shape sys.stdout.flush() print "BoW counting for test exam list" sys.stdout.flush() bow_dat_test = get_exam_bow_dat(exam_test, nb_words, roi_per_img, imgen=imgen, clf_list=clf_list, transformer=None, target_height=img_height, target_scale=img_scale, img_per_batch=img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dlrepr_model=dlrepr_model) for i, d in enumerate(bow_dat_test[1]): print "Shape of test BoW matrix %d:" % (i), d.shape sys.stdout.flush() if pca is not None: print "== Do same BoW counting on PCA transformed data ==" print "BoW counting for train exam list" sys.stdout.flush() bow_dat_train_pca = get_exam_bow_dat(exam_train, nb_words, roi_per_img, img_list=img_list, prob_out=prob_out, clust_list=clust_list_pca) for i, d in enumerate(bow_dat_train_pca[1]): print "Shape of train BoW matrix %d:" % (i), d.shape sys.stdout.flush() print "BoW counting for test exam list" sys.stdout.flush() bow_dat_test_pca = get_exam_bow_dat( exam_test, nb_words, roi_per_img, imgen=imgen, clf_list=clf_list_pca, transformer=pca, target_height=img_height, target_scale=img_scale, img_per_batch=img_per_batch, roi_size=roi_size, low_int_threshold=low_int_threshold, blob_min_area=blob_min_area, blob_min_int=blob_min_int, blob_max_int=blob_max_int, blob_th_step=blob_th_step, seed=random_seed, dlrepr_model=dlrepr_model) for i, d in enumerate(bow_dat_test_pca[1]): print "Shape of test BoW matrix %d:" % (i), d.shape sys.stdout.flush() # Save K-means model and BoW count data. if pca is None: pickle.dump(clf_list, open(pca_km_states, 'w')) pickle.dump(bow_dat_train, open(bow_train_out, 'w')) pickle.dump(bow_dat_test, open(bow_test_out, 'w')) else: pickle.dump((pca, clf_list), open(pca_km_states, 'w')) pickle.dump((bow_dat_train, bow_dat_train_pca), open(bow_train_out, 'w')) pickle.dump((bow_dat_test, bow_dat_test_pca), open(bow_test_out, 'w')) print "Done."