def load_train_classifier(params, features, labels, feature_names, sizes, nb_holdout): logging.info('train classifier...') seg_clf.feature_scoring_selection(features, labels, feature_names, path_out=params['path_exp']) cv = seg_clf.CrossValidatePSetsOut(sizes, nb_hold_out=nb_holdout) # feature norm & train classification fname_classif = seg_clf.TEMPLATE_NAME_CLF.format(params['classif']) path_classif = os.path.join(params['path_exp'], fname_classif) if os.path.isfile(path_classif) and not FORCE_RETRAIN_CLASSIF: logging.info('loading classifier: %s', path_classif) params_local = params.copy() dict_classif = seg_clf.load_classifier(path_classif) classif = dict_classif['clf_pipeline'] params = dict_classif['params'] params.update({k: params_local[k] for k in params_local if k.startswith('path_') or k.startswith('gc_')}) logging.debug('loaded PARAMETERS: %s', repr(params)) else: classif, path_classif = seg_clf.create_classif_train_export( params['classif'], features, labels, cross_val=cv, params=params, feature_names=feature_names, nb_search_iter=params['nb_classif_search'], nb_jobs=params['nb_jobs'], pca_coef=params['pca_coef'], path_out=params['path_exp']) params['path_classif'] = path_classif cv = seg_clf.CrossValidatePSetsOut(sizes, nb_hold_out=nb_holdout) seg_clf.eval_classif_cross_val_scores(params['classif'], classif, features, labels, cross_val=cv, path_out=params['path_exp']) seg_clf.eval_classif_cross_val_roc(params['classif'], classif, features, labels, cross_val=cv, path_out=params['path_exp']) return params, classif, path_classif
def main_train(params): """ PIPELINE for training 0) load triplets or create triplets from path to images, annotations 1) load precomputed data or compute them now 2) train classifier with hyper-parameters 3) perform Leave-One-Out experiment :param {str: any} params: """ logging.info('run TRAINING...') params = prepare_experiment_folder(params, FOLDER_EXPERIMENT) tl_expt.set_experiment_logger(params['path_expt']) logging.info(tl_expt.string_dict(params, desc='PARAMETERS')) with open(os.path.join(params['path_expt'], NAME_JSON_PARAMS), 'w') as f: json.dump(params, f) tl_expt.create_subfolders(params['path_expt'], LIST_SUBDIRS) df_paths, _ = load_df_paths(params) path_dump_data = os.path.join(params['path_expt'], NAME_DUMP_TRAIN_DATA) if not os.path.isfile(path_dump_data) or FORCE_RECOMP_DATA: dict_imgs, dict_segms, dict_slics, dict_points, dict_centers, \ dict_features, dict_labels, feature_names = \ dataset_load_images_segms_compute_features(params, df_paths, params['nb_jobs']) assert len(dict_imgs) > 0, 'missing images' save_dump_data(path_dump_data, dict_imgs, dict_segms, dict_slics, dict_points, dict_centers, dict_features, dict_labels, feature_names) else: dict_imgs, dict_segms, dict_slics, dict_points, dict_centers, dict_features, \ dict_labels, feature_names = load_dump_data(path_dump_data) if is_drawing(params['path_expt']) and EXPORT_TRAINING_DATA: export_dataset_visual(params['path_expt'], dict_imgs, dict_segms, dict_slics, dict_points, dict_labels, params['nb_jobs']) # concentrate features, labels features, labels, sizes = seg_clf.convert_set_features_labels_2_dataset( dict_features, dict_labels, drop_labels=[-1], balance_type=params['balance']) # remove all bad values from features space features[np.isnan(features)] = 0 features[np.isinf(features)] = -1 assert np.sum(sizes) == len(labels), \ 'not equal sizes (%d) and labels (%i)' \ % (int(np.sum(sizes)), len(labels)) # feature norm & train classification nb_holdout = int(np.ceil(len(sizes) * CROSS_VAL_LEAVE_OUT_SEARCH)) cv = seg_clf.CrossValidatePSetsOut(sizes, nb_holdout) classif, params['path_classif'] = seg_clf.create_classif_train_export( params['classif'], features, labels, cross_val=cv, params=params, feature_names=feature_names, nb_search_iter=params['nb_classif_search'], pca_coef=params.get('pca_coef', None), nb_jobs=params['nb_jobs'], path_out=params['path_expt']) nb_holdout = int(np.ceil(len(sizes) * CROSS_VAL_LEAVE_OUT_EVAL)) cv = seg_clf.CrossValidatePSetsOut(sizes, nb_holdout) seg_clf.eval_classif_cross_val_scores(params['classif'], classif, features, labels, cross_val=cv, path_out=params['path_expt']) seg_clf.eval_classif_cross_val_roc(params['classif'], classif, features, labels, cross_val=cv, path_out=params['path_expt']) if RUN_LEAVE_ONE_OUT: experiment_loo(classif, dict_imgs, dict_segms, dict_centers, dict_slics, dict_points, dict_features, feature_names) logging.info('DONE')
def train_classif_color2d_slic_features(list_images, list_annots, dict_features, sp_size=30, sp_regul=0.2, clf_name=CLASSIF_NAME, label_purity=0.9, feature_balance='unique', pca_coef=None, nb_classif_search=1, nb_hold_out=CROSS_VAL_LEAVE_OUT, nb_jobs=1): """ train classifier on list of annotated images :param [ndarray] list_images: :param [ndarray] list_annots: :param int sp_size: initial size of a superpixel(meaning edge lenght) :param float sp_regul: regularisation in range(0;1) where "0" gives elastic and "1" nearly square segments :param {str: [str]} dict_features: list of features to be extracted :param str clf_name: selet udsed classifier :param float label_purity: set the sample-labels purity for training :param str feature_balance: set how to balance datasets :param float pca_coef: select PCA coef or None :param int nb_classif_search: number of tries for hyper-parameters seach :param int nb_hold_out: cross-val leave out :param int nb_jobs: parallelism :return: """ logging.info('TRAIN Superpixels-Features-Classifier') assert len(list_images) == len(list_annots), \ 'size of images (%i) and annotations (%i) should match' \ % (len(list_images), len(list_annots)) list_slic, list_features, list_labels = list(), list(), list() _wrapper_compute = partial(wrapper_compute_color2d_slic_features_labels, sp_size=sp_size, sp_regul=sp_regul, dict_features=dict_features, label_purity=label_purity) list_imgs_annot = zip(list_images, list_annots) iterate = tl_expt.WrapExecuteSequence( _wrapper_compute, list_imgs_annot, desc='compute SLIC & features & labels', nb_jobs=nb_jobs) for slic, fts, lbs in iterate: list_slic.append(slic) list_features.append(fts) list_labels.append(lbs) logging.debug('concentrate features...') # concentrate features, labels features, labels, sizes = seg_clf.convert_set_features_labels_2_dataset( dict(zip(range(len(list_features)), list_features)), dict(zip(range(len(list_labels)), list_labels)), balance_type=feature_balance, drop_labels=[-1]) # drop do not care label whichare -1 features = np.nan_to_num(features) logging.debug('train classifier...') # clf_pipeline = seg_clf.create_clf_pipeline(clf_name, pca_coef) # clf_pipeline.fit(np.array(features), np.array(labels, dtype=int)) if len(sizes) > (nb_hold_out * 5): cv = seg_clf.CrossValidatePSetsOut(sizes, nb_hold_out=nb_hold_out) # for small nuber of training images this does not make sence else: cv = 10 classif, _ = seg_clf.create_classif_train_export( clf_name, features, labels, pca_coef=pca_coef, cross_val=cv, nb_search_iter=nb_classif_search, nb_jobs=nb_jobs) return classif, list_slic, list_features, list_labels