def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Build and test models based on dim reductions and provided spectra' ) subparsers = parser.add_subparsers(dest='subparser_name') parser.add_argument( '--metadata_path', type=str, default='.', metavar='PATH', help='Metadata path to work from, if not ''.''' ) parser.add_argument( '--spectra_path', type=str, default='.', metavar='PATH', help='Spectra path to work from, if not ''.''' ) parser.add_argument( '--method', type=str, default='ICA', metavar='METHOD', help='Dim reduction method to load data for' ) parser.add_argument( '--n_jobs', type=int, default=1, metavar='N_JOBS', help='N_JOBS' ) parser.add_argument( '--model', type=str, choices=['ET', 'RF', 'GP', 'KNN', 'SVR'], default='ET', help='Which model type to use: ET (Extra Trees), RF (Random Forest), GP (Gaussian Process), KNN, or SVR (Support Vector Regression)' ) parser.add_argument( '--load_model', action='store_true', help='Whether or not to load the model from --model_path' ) parser.add_argument( '--model_path', type=str, default='model.pkl', metavar='MODEL_PATH', help='COMPLETE path from which to load a model' ) parser.add_argument( '--metadata_flags', type=str, default='', metavar='METADATA_FLAGS', help='Flags specifying observational metadata pre-processing, e.g. LUNAR_MAG which takes the '\ 'magnitude and linearizes it (ignoring that it is an area magnitude)' ) parser.add_argument( '--compacted_path', type=str, default=None, metavar='COMPATED_PATH', help='Path to find compacted/arrayized data; setting this will cause --path, --pattern to be ignored' ) parser_compare = subparsers.add_parser('compare') parser_compare.add_argument( '--folds', type=int, default=3, metavar='TEST_FOLDS', help='Do k-fold cross validation with specified number of folds. Defaults to 3.' ) parser_compare.add_argument( '--iters', type=int, default=50, metavar='HYPER_FIT_ITERS', help='Number of iterations when fitting hyper-params' ) parser_compare.add_argument( '--outputfbk', action='store_true', help='If set, outputs \'grid_scores_\' data from RandomizedSearchCV' ) parser_compare.add_argument( '--save_best', action='store_true', help='Whether or not to save the (last/best) model built for e.g. --hyper_fit' ) parser_compare.add_argument( '--scorer', type=str, choices=['R2', 'MAE', 'MSE', 'LL', 'EXP_VAR', 'MAPED', 'MSEMV'], default='R2', help='Which scoring method to use to determine ranking of model instances.' ) parser_compare.add_argument( '--use_spectra', action='store_true', help='Whether scoring is done against the DM components or the predicted spectra' ) parser_compare.add_argument( '--ivar_cutoff', type=float, default=0.001, metavar='IVAR_CUTOFF', help='data with inverse variace below cutoff is masked as if ivar==0' ) parser_compare.add_argument( '--plot_final_errors', action='store_true', help='If set, will plot the errors from the final/best model, for the whole dataset, from ' + \ 'the best model re-trained on CV folds used for testing.' + \ 'Plots all errors on top of each other with low-ish alpha, to give a kind of visual ' + \ 'density map of errors.' ) args = parser.parse_args() obs_metadata = trim_observation_metadata(load_observation_metadata(args.metadata_path, flags=args.metadata_flags)) sources, components, exposures, wavelengths = ICAize.deserialize_data(args.spectra_path, args.method) source_model, ss, model_args = ICAize.unpickle_model(args.spectra_path, args.method) comb_flux_arr, comb_exposure_arr, comb_wavelengths = None, None, None if args.use_spectra: comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = ICAize.load_data(args) filter_arr = np.in1d(comb_exposure_arr, exposures) comb_flux_arr = comb_flux_arr[filter_arr] comb_exposure_arr = comb_exposure_arr[filter_arr] sorted_inds = np.argsort(comb_exposure_arr) comb_flux_arr = comb_flux_arr[sorted_inds] comb_exposure_arr = comb_exposure_arr[sorted_inds] del comb_ivar_arr del comb_masks reduced_obs_metadata = obs_metadata[np.in1d(obs_metadata['EXP_ID'], exposures)] reduced_obs_metadata.sort('EXP_ID') sorted_inds = np.argsort(exposures) reduced_obs_metadata.remove_column('EXP_ID') md_len = len(reduced_obs_metadata) var_count = len(reduced_obs_metadata.columns) X_arr = np.array(reduced_obs_metadata).view('f8').reshape((md_len,-1)) Y_arr = sources[sorted_inds] if args.load_model: predictive_model = load_model(args.model_path) else: predictive_model = get_model(args.model) if args.subparser_name == 'compare': pdist = get_param_distribution_for_model(args.model, args.iters) scorer = None if args.scorer == 'R2': scorer = make_scorer(R2) elif args.scorer == 'MAE': if args.use_spectra: p_MAE_ = partial(MAE, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_MAE_, greater_is_better=False) else: scorer = make_scorer(MAE, greater_is_better=False) elif args.scorer == 'MSE': if args.use_spectra: p_MSE_ = partial(MSE, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_MSE_, greater_is_better=False) else: scorer = make_scorer(MSE, greater_is_better=False) elif args.scorer == 'MSEMV': if args.use_spectra: p_MSEMV_ = partial(MSEMV, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_MSEMV_, greater_is_better=False) else: scorer = make_scorer(MSEMV, greater_is_better=False) elif args.scorer == 'EXP_VAR': if args.use_spectra: p_EXP_VAR_ = partial(EXP_VAR, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_EXP_VAR_) else: scorer = make_scorer(EXP_VAR) elif args.scorer == 'MAPED': if args.use_spectra: p_MAPED_ = partial(MAPED, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_MAPED_, greater_is_better=False) else: scorer = make_scorer(MAPED, greater_is_better=False) elif args.scorer == 'LL': scorer = None folder = ShuffleSplit(exposures.shape[0], n_iter=args.folds, test_size=1.0/args.folds, random_state=12345) if args.model == 'GP': predictive_model.random_start = args.folds rcv = GridSearchCV(predictive_model, param_grid=pdist, error_score=0, cv=3, n_jobs=args.n_jobs, scoring=scorer) #random_state=RANDOM_STATE, #n_iter=args.iters, else: rcv = RandomizedSearchCV(predictive_model, param_distributions=pdist, n_iter=args.iters, cv=folder, n_jobs=args.n_jobs, scoring=scorer) # This is going to fit X (metdata) to Y (DM'ed sources). But there are # really two tests here: how well hyperparams fit/predict the sources # and how well they fit/predict the actual source spectra. Until I know # better, I 'm going to need to build a way to test both. rcv.fit(X_arr, Y_arr) print(rcv.best_score_) print(rcv.best_params_) print(rcv.best_estimator_) if args.outputfbk: print("=+"*10 + "=") for val in rcv.grid_scores_: print(val) print("=+"*10 + "=") if args.save_best: save_model(rcv.best_estimator_, args.model_path) if args.plot_final_errors: for train_inds, test_inds in folder: rcv.best_estimator_.fit(X_arr[train_inds], Y_arr[train_inds]) predicted = rcv.best_estimator_.predict(X_arr[test_inds]) back_trans_flux = ICAize.inverse_transform(predicted, source_model, ss, args.method, model_args) diffs = np.abs(comb_flux_arr[test_inds] - back_trans_flux) #Is there not 'trick' to getting matplotlib to do this without a loop? for i in range(diffs.shape[0]): plt.plot(comb_wavelengths, diffs[i, :], 'b-', alpha=0.01) plt.show()
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Compute PCA/ICA/NMF/etc. components over set of stacked spectra, save those out, and pickle model' ) subparsers = parser.add_subparsers(dest='subparser_name') parser.add_argument( '--pattern', type=str, default='stacked*exp??????.*', metavar='PATTERN', help='File pattern for stacked sky fibers.' ) parser.add_argument( '--path', type=str, default='.', metavar='PATH', help='Path to work from, if not ''.''' ) parser.add_argument( '--compacted_path', type=str, default=None, metavar='COMPATED_PATH', help='Path to find compacted/arrayized data; setting this will cause --path, --pattern to be ignored' ) parser.add_argument( '--method', type=str, default=['ICA'], metavar='METHOD', choices=['ICA', 'PCA', 'SPCA', 'NMF', 'ISO', 'KPCA', 'FA', 'DL'], nargs='+', help='Which dim. reduction method to use' ) parser.add_argument( '--scale', action='store_true', help='Should inputs be scaled? Will mean subtract and value scale, but does not scale variace.' ) parser.add_argument( '--ivar_cutoff', type=float, default=0.001, metavar='IVAR_CUTOFF', help='data with inverse variace below cutoff is masked as if ivar==0' ) parser.add_argument( '--n_iter', type=int, default=1200, metavar='MAX_ITER', help='Maximum number of iterations to allow for convergence. For SDSS data 1000 is a safe number of ICA, while SPCA requires larger values e.g. ~2000 to ~2500' ) parser.add_argument( '--n_jobs', type=int, default=None, metavar='N_JOBS', help='N_JOBS' ) parser_compare = subparsers.add_parser('compare') parser_compare.add_argument( '--max_components', type=int, default=50, metavar='COMP_MAX', help='Max number of components to use/test' ) parser_compare.add_argument( '--min_components', type=int, default=0, metavar='COMP_MIN', help='Min number of compoenents to use/test' ) parser_compare.add_argument( '--step_size', type=int, default=5, metavar='COMP_STEP', help='Step size from comp_min to comp_max' ) parser_compare.add_argument( '--comparison', choices=['EXP_VAR', 'R2', 'MSE', 'MAE'], nargs='*', default=['EXP_VAR'], help='Comparison methods: Explained variance (score), R2 (score), mean sq. error (loss), MEDIAN absolute error (loss)' ) parser_compare.add_argument( '--mle_if_avail', action='store_true', help='In additon to --comparison, include MLE if PCA or FA methods specified' ) parser_compare.add_argument( '--plot_example_reconstruction', action='store_true', help='Pick a random spectrum, plot its actual and reconstructed versions' ) parser_build = subparsers.add_parser('build') parser_build.add_argument( '--n_components', type=int, default=40, metavar='N_COMPONENTS', help='Number of ICA/PCA/etc. components' ) parser_build.add_argument( '--n_neighbors', type=int, default=10, metavar='N_NEIGHBORS', help='Number of neighbots for e.g. IsoMap' ) args = parser.parse_args() comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = iz.load_data(args) if 'DL' in args.method: flux_arr = comb_flux_arr.astype(dtype=np.float64) else: flux_arr = comb_flux_arr scaled_flux_arr = None ss = None if args.scale: ss = skpp.StandardScaler(with_std=False) scaled_flux_arr = ss.fit_transform(flux_arr) else: scaled_flux_arr = flux_arr if args.subparser_name == 'compare': fig, ax1 = plt.subplots() ax2 = ax1.twinx() for method in args.method: model = iz.get_model(method, max_iter=args.n_iter, random_state=iz.random_state, n_jobs=args.n_jobs) scores = {} mles_and_covs = args.mle_if_avail and (method == 'FA' or method == 'PCA') n_components = np.arange(args.min_components, args.max_components+1, args.step_size) for n in n_components: print("Cross validating for n=" + str(n) + " on method " + method) model.n_components = n comparisons = iz.score_via_CV(args.comparison, flux_arr if method == 'NMF' else scaled_flux_arr, model, method, n_jobs=args.n_jobs, include_mle=mles_and_covs, modeler=_iter_modeler, scorer=_iter_scorer) for key, val in comparisons.items(): if key in scores: scores[key].append(val) else: scores[key] = [val] if mles_and_covs: #ax2.axhline(cov_mcd_score(scaled_flux_arr, args.scale), color='violet', label='MCD Cov', linestyle='--') ax2.axhline(cov_lw_score(scaled_flux_arr, args.scale), color='orange', label='LW Cov', linestyle='--') for key, score_list in scores.items(): if key != 'mle': ax1.plot(n_components, score_list, label=method + ':' + key + ' scores') else: ax2.plot(n_components, score_list, '-.', label=method + ' mle scores') ax1.set_xlabel('nb of components') ax1.set_ylabel('CV scores', figure=fig) ax1.legend(loc='lower left') ax2.legend(loc='lower right') plt.show()
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Compute PCA/ICA/NMF/etc. components over set of stacked spectra, save those out, and pickle model' ) parser.add_argument( '--pattern', type=str, default='stacked*exp??????.*', metavar='PATTERN', help='File pattern for stacked sky fibers.' ) parser.add_argument( '--path', type=str, default='.', metavar='PATH', help='Path to work from, if not ''.''' ) parser.add_argument( '--compacted_path', type=str, default=None, metavar='COMPATED_PATH', help='Path to find compacted/arrayized data; setting this will cause --path, --pattern to be ignored' ) parser.add_argument( '--n_components', type=int, default=40, metavar='N_COMPONENTS', help='Number of ICA/PCA/etc. components' ) parser.add_argument( '--method', type=str, default='ICA', metavar='METHOD', choices=['ICA', 'PCA', 'SPCA', 'NMF', 'ISO', 'KPCA', 'FA', 'DL'], help='Which dim. reduction method to use' ) parser.add_argument( '--scale', action='store_true', help='Should inputs variance be scaled? Defaults to mean subtract and value scale, but w/out this does not scale variance.' ) parser.add_argument( '--no_scale', action='store_true', help='Suppresses all scaling' ) parser.add_argument( '--ivar_cutoff', type=float, default=0.001, metavar='IVAR_CUTOFF', help='data with inverse variace below cutoff is masked as if ivar==0' ) parser.add_argument( '--n_iter', type=int, default=1200, metavar='MAX_ITER', help='Maximum number of iterations to allow for convergence. For SDSS data 1000 is a safe number of ICA, while SPCA requires larger values e.g. ~2000 to ~2500' ) parser.add_argument( '--n_jobs', type=int, default=None, metavar='N_JOBS', help='N_JOBS' ) args = parser.parse_args() comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = iz.load_data(args) model = iz.get_model(args.method, n=args.n_components, n_neighbors=None, max_iter=args.n_iter, random_state=iz.random_state, n_jobs=args.n_jobs) ss = None if args.no_scale: scaled_flux_arr = comb_flux_arr else: ss = skpp.StandardScaler(with_std=False) if args.scale: ss = skpp.StandardScaler(with_std=True) scaled_flux_arr = ss.fit_transform(comb_flux_arr) #Heavily copied from J. Vanderplas/astroML bayesian_blocks.py N = comb_wavelengths.size step = args.n_components * 4 edges = np.concatenate([comb_wavelengths[:1:step], 0.5 * (comb_wavelengths[1::step] + comb_wavelengths[:-1:step]), comb_wavelengths[-1::step]]) block_length = comb_wavelengths[-1::step] - edges # arrays to store the best configuration nn_vec = np.ones(N/step) * step best = np.zeros(N, dtype=float) last = np.zeros(N, dtype=int) for R in range(N/step): print("R: " + str(R)) width = block_length[:R + 1] - block_length[R + 1] count_vec = np.cumsum(nn_vec[:R + 1][::-1])[::-1] #width = nn_vec[:R + 1] - nn_vec[R + 1] #count_vec = np.cumsum(nn_vec[:R + 1][::-1])[::-1] #print(width) #print(count_vec) #raw_input("Pausing... ") fit_vec = map(lambda n: iz.score_via_CV(['LL'], scaled_flux_arr[:, :n], model, ss, args.method, folds=3, n_jobs=args.n_jobs), count_vec) fit_vec = [d["mle"] for d in fit_vec] #print(fit_vec) fit_vec[1:] += best[:R] #print(fit_vec) i_max = np.argmax(fit_vec) last[R] = i_max best[R] = fit_vec[i_max] #print(best) change_points = np.zeros(N/step, dtype=int) i_cp = N/step ind = N/step while True: i_cp -= 1 change_points[i_cp] = ind if ind == 0: break ind = last[ind - 1] change_points = change_points[i_cp:] print(edges[change_points]) '''
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description= 'Compute PCA/ICA/NMF/etc. components over set of stacked spectra, save those out, and pickle model' ) parser.add_argument('--pattern', type=str, default='stacked*exp??????.*', metavar='PATTERN', help='File pattern for stacked sky fibers.') parser.add_argument('--path', type=str, default='.', metavar='PATH', help='Path to work from, if not ' '.' '') parser.add_argument( '--compacted_path', type=str, default=None, metavar='COMPATED_PATH', help= 'Path to find compacted/arrayized data; setting this will cause --path, --pattern to be ignored' ) parser.add_argument('--n_components', type=int, default=40, metavar='N_COMPONENTS', help='Number of ICA/PCA/etc. components') parser.add_argument( '--method', type=str, default='ICA', metavar='METHOD', choices=['ICA', 'PCA', 'SPCA', 'NMF', 'ISO', 'KPCA', 'FA', 'DL'], help='Which dim. reduction method to use') parser.add_argument( '--scale', action='store_true', help= 'Should inputs variance be scaled? Defaults to mean subtract and value scale, but w/out this does not scale variance.' ) parser.add_argument('--no_scale', action='store_true', help='Suppresses all scaling') parser.add_argument( '--ivar_cutoff', type=float, default=0.001, metavar='IVAR_CUTOFF', help='data with inverse variace below cutoff is masked as if ivar==0') parser.add_argument( '--n_iter', type=int, default=1200, metavar='MAX_ITER', help= 'Maximum number of iterations to allow for convergence. For SDSS data 1000 is a safe number of ICA, while SPCA requires larger values e.g. ~2000 to ~2500' ) parser.add_argument('--n_jobs', type=int, default=None, metavar='N_JOBS', help='N_JOBS') args = parser.parse_args() comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = iz.load_data( args) model = iz.get_model(args.method, n=args.n_components, n_neighbors=None, max_iter=args.n_iter, random_state=iz.random_state, n_jobs=args.n_jobs) ss = None if args.no_scale: scaled_flux_arr = comb_flux_arr else: ss = skpp.StandardScaler(with_std=False) if args.scale: ss = skpp.StandardScaler(with_std=True) scaled_flux_arr = ss.fit_transform(comb_flux_arr) #Heavily copied from J. Vanderplas/astroML bayesian_blocks.py N = comb_wavelengths.size step = args.n_components * 4 edges = np.concatenate([ comb_wavelengths[:1:step], 0.5 * (comb_wavelengths[1::step] + comb_wavelengths[:-1:step]), comb_wavelengths[-1::step] ]) block_length = comb_wavelengths[-1::step] - edges # arrays to store the best configuration nn_vec = np.ones(N / step) * step best = np.zeros(N, dtype=float) last = np.zeros(N, dtype=int) for R in range(N / step): print("R: " + str(R)) width = block_length[:R + 1] - block_length[R + 1] count_vec = np.cumsum(nn_vec[:R + 1][::-1])[::-1] #width = nn_vec[:R + 1] - nn_vec[R + 1] #count_vec = np.cumsum(nn_vec[:R + 1][::-1])[::-1] #print(width) #print(count_vec) #raw_input("Pausing... ") fit_vec = map( lambda n: iz.score_via_CV(['LL'], scaled_flux_arr[:, :n], model, ss, args.method, folds=3, n_jobs=args.n_jobs), count_vec) fit_vec = [d["mle"] for d in fit_vec] #print(fit_vec) fit_vec[1:] += best[:R] #print(fit_vec) i_max = np.argmax(fit_vec) last[R] = i_max best[R] = fit_vec[i_max] #print(best) change_points = np.zeros(N / step, dtype=int) i_cp = N / step ind = N / step while True: i_cp -= 1 change_points[i_cp] = ind if ind == 0: break ind = last[ind - 1] change_points = change_points[i_cp:] print(edges[change_points]) '''
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description= 'Build and test models based on dim reductions and provided spectra') subparsers = parser.add_subparsers(dest='subparser_name') parser.add_argument('--metadata_path', type=str, default='.', metavar='PATH', help='Metadata path to work from, if not ' '.' '') parser.add_argument('--spectra_path', type=str, default='.', metavar='PATH', help='Spectra path to work from, if not ' '.' '') parser.add_argument('--method', type=str, default='ICA', metavar='METHOD', help='Dim reduction method to load data for') parser.add_argument('--n_jobs', type=int, default=1, metavar='N_JOBS', help='N_JOBS') parser.add_argument( '--model', type=str, choices=['ET', 'RF', 'GP', 'KNN', 'SVR'], default='ET', help= 'Which model type to use: ET (Extra Trees), RF (Random Forest), GP (Gaussian Process), KNN, or SVR (Support Vector Regression)' ) parser.add_argument( '--load_model', action='store_true', help='Whether or not to load the model from --model_path') parser.add_argument('--model_path', type=str, default='model.pkl', metavar='MODEL_PATH', help='COMPLETE path from which to load a model') parser.add_argument( '--metadata_flags', type=str, default='', metavar='METADATA_FLAGS', help='Flags specifying observational metadata pre-processing, e.g. LUNAR_MAG which takes the '\ 'magnitude and linearizes it (ignoring that it is an area magnitude)' ) parser.add_argument( '--compacted_path', type=str, default=None, metavar='COMPATED_PATH', help= 'Path to find compacted/arrayized data; setting this will cause --path, --pattern to be ignored' ) parser_compare = subparsers.add_parser('compare') parser_compare.add_argument( '--folds', type=int, default=3, metavar='TEST_FOLDS', help= 'Do k-fold cross validation with specified number of folds. Defaults to 3.' ) parser_compare.add_argument( '--iters', type=int, default=50, metavar='HYPER_FIT_ITERS', help='Number of iterations when fitting hyper-params') parser_compare.add_argument( '--outputfbk', action='store_true', help='If set, outputs \'grid_scores_\' data from RandomizedSearchCV') parser_compare.add_argument( '--save_best', action='store_true', help= 'Whether or not to save the (last/best) model built for e.g. --hyper_fit' ) parser_compare.add_argument( '--scorer', type=str, choices=['R2', 'MAE', 'MSE', 'LL', 'EXP_VAR', 'MAPED', 'MSEMV'], default='R2', help= 'Which scoring method to use to determine ranking of model instances.') parser_compare.add_argument( '--use_spectra', action='store_true', help= 'Whether scoring is done against the DM components or the predicted spectra' ) parser_compare.add_argument( '--ivar_cutoff', type=float, default=0.001, metavar='IVAR_CUTOFF', help='data with inverse variace below cutoff is masked as if ivar==0') parser_compare.add_argument( '--plot_final_errors', action='store_true', help='If set, will plot the errors from the final/best model, for the whole dataset, from ' + \ 'the best model re-trained on CV folds used for testing.' + \ 'Plots all errors on top of each other with low-ish alpha, to give a kind of visual ' + \ 'density map of errors.' ) args = parser.parse_args() obs_metadata = trim_observation_metadata( load_observation_metadata(args.metadata_path, flags=args.metadata_flags)) sources, components, exposures, wavelengths = ICAize.deserialize_data( args.spectra_path, args.method) source_model, ss, model_args = ICAize.unpickle_model( args.spectra_path, args.method) comb_flux_arr, comb_exposure_arr, comb_wavelengths = None, None, None if args.use_spectra: comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = ICAize.load_data( args) filter_arr = np.in1d(comb_exposure_arr, exposures) comb_flux_arr = comb_flux_arr[filter_arr] comb_exposure_arr = comb_exposure_arr[filter_arr] sorted_inds = np.argsort(comb_exposure_arr) comb_flux_arr = comb_flux_arr[sorted_inds] comb_exposure_arr = comb_exposure_arr[sorted_inds] del comb_ivar_arr del comb_masks reduced_obs_metadata = obs_metadata[np.in1d(obs_metadata['EXP_ID'], exposures)] reduced_obs_metadata.sort('EXP_ID') sorted_inds = np.argsort(exposures) reduced_obs_metadata.remove_column('EXP_ID') md_len = len(reduced_obs_metadata) var_count = len(reduced_obs_metadata.columns) X_arr = np.array(reduced_obs_metadata).view('f8').reshape((md_len, -1)) Y_arr = sources[sorted_inds] if args.load_model: predictive_model = load_model(args.model_path) else: predictive_model = get_model(args.model) if args.subparser_name == 'compare': pdist = get_param_distribution_for_model(args.model, args.iters) scorer = None if args.scorer == 'R2': scorer = make_scorer(R2) elif args.scorer == 'MAE': if args.use_spectra: p_MAE_ = partial(MAE, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_MAE_, greater_is_better=False) else: scorer = make_scorer(MAE, greater_is_better=False) elif args.scorer == 'MSE': if args.use_spectra: p_MSE_ = partial(MSE, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_MSE_, greater_is_better=False) else: scorer = make_scorer(MSE, greater_is_better=False) elif args.scorer == 'MSEMV': if args.use_spectra: p_MSEMV_ = partial(MSEMV, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_MSEMV_, greater_is_better=False) else: scorer = make_scorer(MSEMV, greater_is_better=False) elif args.scorer == 'EXP_VAR': if args.use_spectra: p_EXP_VAR_ = partial(EXP_VAR, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_EXP_VAR_) else: scorer = make_scorer(EXP_VAR) elif args.scorer == 'MAPED': if args.use_spectra: p_MAPED_ = partial(MAPED, Y_full=Y_arr, flux_arr=comb_flux_arr, source_model=source_model, ss=ss, source_model_args=model_args, method=args.method) scorer = make_scorer(p_MAPED_, greater_is_better=False) else: scorer = make_scorer(MAPED, greater_is_better=False) elif args.scorer == 'LL': scorer = None folder = ShuffleSplit(exposures.shape[0], n_iter=args.folds, test_size=1.0 / args.folds, random_state=12345) if args.model == 'GP': predictive_model.random_start = args.folds rcv = GridSearchCV(predictive_model, param_grid=pdist, error_score=0, cv=3, n_jobs=args.n_jobs, scoring=scorer) #random_state=RANDOM_STATE, #n_iter=args.iters, else: rcv = RandomizedSearchCV(predictive_model, param_distributions=pdist, n_iter=args.iters, cv=folder, n_jobs=args.n_jobs, scoring=scorer) # This is going to fit X (metdata) to Y (DM'ed sources). But there are # really two tests here: how well hyperparams fit/predict the sources # and how well they fit/predict the actual source spectra. Until I know # better, I 'm going to need to build a way to test both. rcv.fit(X_arr, Y_arr) print(rcv.best_score_) print(rcv.best_params_) print(rcv.best_estimator_) if args.outputfbk: print("=+" * 10 + "=") for val in rcv.grid_scores_: print(val) print("=+" * 10 + "=") if args.save_best: save_model(rcv.best_estimator_, args.model_path) if args.plot_final_errors: for train_inds, test_inds in folder: rcv.best_estimator_.fit(X_arr[train_inds], Y_arr[train_inds]) predicted = rcv.best_estimator_.predict(X_arr[test_inds]) back_trans_flux = ICAize.inverse_transform( predicted, source_model, ss, args.method, model_args) diffs = np.abs(comb_flux_arr[test_inds] - back_trans_flux) #Is there not 'trick' to getting matplotlib to do this without a loop? for i in range(diffs.shape[0]): plt.plot(comb_wavelengths, diffs[i, :], 'b-', alpha=0.01) plt.show()