cv=cv, n_jobs=1) # Normalize to a probability distribution y_pred /= np.sum(y_pred, axis=1)[:, np.newaxis] # Calculate the max-predicted bin ybin_centers = bin_centers_from_edges(ybin_edges) ybin_grid = linearized_bin_grid(ybin_centers) y_predicted = ybin_grid[np.argmax(y_pred, axis=1)] # Output if DISPLAY_PLOTS: fig, axes = n_subplot_grid(y_predicted.shape[1], max_horizontal=1, figsize=(10, 8)) for dim, ax in enumerate(axes): ax.plot(T_pred, y_test[:, dim]) ax.plot(T_pred, y_predicted[:, dim]) ax.set_title('y test (blue) vs predicted (orange) dim={}'.format(dim)) fig.show() if SAVE_TO_FILE is not None: from mlneuro.utils.io import save_array_dict save_array_dict( SAVE_TO_FILE, { 'times': T_pred, 'estimates': y_pred.reshape(-1, STIMULUS_BINS, STIMULUS_BINS), 'max_estimate': y_predicted,
y_pred, method='predict_proba') else: # Get unfiltered results T_filt = T y_pred_filt = y_pred if isinstance(filt, BinningFilter): T_filt = bin_centers_from_edges([T_filt]) filtered_times.append(T_filt) filtered_predictions.append(y_pred_filt) # Calculate the max-predicted bin y_predicted = ybin_grid[np.argmax(y_pred_filt, axis=1)] filtered_max_pred.append(y_predicted) # Output if DISPLAY_PLOTS: fig, axes = n_subplot_grid(len(FILTERS), max_horizontal=1, figsize=(10, 20), hspace=1.1) for times, pred, ax, filt in zip(filtered_times, filtered_max_pred, axes, FILTERS): ax.plot(T, y[:, 0]) ax.plot(times, pred[:, 0]) if PLOT_X_RANGE is not None: ax.set_xlim(PLOT_X_RANGE) ax.set_title('y filtered with {}'.format(filt)) fig.show()
score_time = results[name]['score_time'] print('[{}] Fit time {:.4} +- {:.2} | Score time {:.4} +- {:.2}'.format(name, np.mean(fit_time), np.std(fit_time), np.mean(score_time), np.std(score_time))) for scorer in SCORERS: test_score = results[name]['test_' + scorer] train_score = results[name]['train_' + scorer] print ('[{}] {}: Test score {:.4} +- {:.2} | Train score {:.4} +- {:.2}'.format(name, scorer, np.mean(test_score), np.std(test_score), np.mean(train_score), np.std(train_score))) # Display bar plots comparing the scores per estimator if DISPLAY_BAR_PLOTS: results_keys = list(results.items())[0][1].keys() # Make room on the right side of the plot for fig, axes = n_subplot_grid(len(results_keys), max_horizontal=2, right=0.25, figsize=(14,14)) from matplotlib.pyplot import cm from matplotlib.patches import Patch colors = cm.rainbow(np.linspace(0, 1, len(results))) key_axes = {} for key, ax in zip(results_keys, axes): bars = [] ax.set_title(key) metric_results = [] for est_key in results: metric_results.append(results[est_key][key]) metric_results = np.array(metric_results) inds = np.arange(metric_results.shape[0]) width = 0.5
X_train, X_test, T_train, T_test, y_train, y_test = train_test_split( X, T, y, test_size=0.15, shuffle=False) pipeline_quadratic.fit(X_train, y_train) pipeline_linear.fit(X_train, y_train) if DISPLAY_TUNING_CURVES: # Calculate ground truth firing rates / bin ground_truth_tc = np.zeros( (X.shape[1], pipeline_quadratic.ybin_grid.shape[0])) bin_positions = binned_data(y, pipeline_quadratic.ybin_edges) for i in range(X.shape[1]): for b in range(ground_truth_tc.shape[1]): ground_truth_tc[i, b] = np.sum(X[bin_positions == b, i]) fig, axes = n_subplot_grid(30, max_horizontal=3, hspace=0.01, wspace=0.01) for i in range(0, 30, 3): axes[i].imshow(pipeline_quadratic.tuning_curves[i].reshape( STIMULUS_BINS, STIMULUS_BINS)) axes[i].set_title('Quadratic') axes[i + 1].imshow(pipeline_linear.tuning_curves[i].reshape( STIMULUS_BINS, STIMULUS_BINS)) axes[i + 1].set_title('Linear') axes[i + 2].imshow(ground_truth_tc[i].reshape(STIMULUS_BINS, STIMULUS_BINS)) axes[i + 2].set_title('Ground truth') fig.show() # Select a pipeline to use for prediction pipeline = pipeline_quadratic