for row in range(rows): # for each eta for col in range(cols): # for each learner print("Plotting (%d, %d)" % (row, col)) ax = plt.subplot(rows, cols, row * cols + col + 1) eta = etas[row] # (n_trials x n_fdfs x n) matrix of errors # print(type(errors[eta][0])) # print(errors[eta][0].shape) error = np.array( [[errors[eta][itrial][ifdf, col] for ifdf in range(n_fdfs)] for itrial in range(n_trials)]) error /= Yrms # error[~np.isfinite(error)] = 2 error[~np.isfinite(error)] = 1e6 error = filt.filt(error, axis=-1) # error = filt.filtfilt(error, axis=-1) batch_inds = (n_per_show / 1000.) * np.arange(error.shape[-1]) sns.tsplot( data=np.transpose(error, (0, 2, 1)), time=batch_inds, condition=f_df_labels, # err_style='unit_traces', legend=(row == 0 and col == 0)) ax.set_ylim([0.09, 1.2]) ax.set_yscale('log') if row + 1 == rows: ax.set_xlabel('thousands of examples')
eta_errors = np.array(eta_errors) batch_inds = n_per_batch * np.arange(eta_errors.shape[-1]) # --- plot results (traces=learners) plt.figure(figsize=(6.35, 6.0)) rows, cols = 2, 2 assert len(etas) <= rows * cols for i, eta in enumerate(etas): trial_errors = eta_errors[i] filt = Alpha(30, default_dt=n_per_batch) trial_errors[~np.isfinite(trial_errors)] = 1e6 trial_errors = trial_errors.clip(None, 1e6) trial_errors = filt.filt(trial_errors / Yrms, axis=-1) # trial_errors = filt.filtfilt(trial_errors / Yrms, axis=-1) ax = plt.subplot(rows, cols, i + 1) # ax = sns.tsplot(data=np.transpose(trial_errors, (0, 2, 1)), # time=batch_inds, condition=learner_names) sns.tsplot(data=np.transpose(trial_errors, (0, 2, 1)), time=batch_inds, condition=learner_names, err_style='unit_traces', legend=(i == 0)) # ax.set(yscale='log') plt.ylim([1e-1, 1.2e0]) plt.xlabel('# of examples') plt.ylabel('normalized RMS error')