# Te_GPC_w = scipy.mean(Te_GPC, axis=1) # dev_Te_GPC_w = scipy.std(Te_GPC, axis=1, ddof=1) # R_mid_GPC_w = scipy.mean(R_mid_GPC, axis=1) # dev_R_mid_GPC_w = scipy.std(R_mid_GPC, axis=1, ddof=1) # # Get rid of clearly too small points (Why do these happen?) # good_idxs = (Te_GPC_w >= 0.1) # Te_GPC_w = Te_GPC_w[good_idxs] # dev_Te_GPC_w = dev_Te_GPC_w[good_idxs] # R_mid_GPC_w = R_mid_GPC_w[good_idxs] # dev_R_mid_GPC_w = dev_R_mid_GPC_w[good_idxs] # Average over entire data set, use robust estimators: # IQR_to_std = 1.349 robust = True Te_TS_w, dev_Te_TS_w = gptools.compute_stats(Te_TS, robust=robust) R_mid_w, dev_R_mid_w = gptools.compute_stats(R_mid_CTS, robust=robust) Te_ETS_w, dev_Te_ETS_w = gptools.compute_stats(Te_ETS, robust=robust, check_nan=True) R_mid_ETS_w, dev_R_mid_ETS_w = gptools.compute_stats(R_mid_ETS, robust=robust) Te_FRC_w, dev_Te_FRC_w = gptools.compute_stats(Te_FRC, robust=robust) R_mid_FRC_w, dev_R_mid_FRC_w = gptools.compute_stats(R_mid_FRC, robust=robust) # Get rid of clearly too small points (Why do these happen?) good_idxs = Te_FRC_w >= 0.1 Te_FRC_w = Te_FRC_w[good_idxs] dev_Te_FRC_w = dev_Te_FRC_w[good_idxs] R_mid_FRC_w = R_mid_FRC_w[good_idxs] dev_R_mid_FRC_w = dev_R_mid_FRC_w[good_idxs] Te_GPC2_w, dev_Te_GPC2_w = gptools.compute_stats(Te_GPC2, robust=robust, check_nan=True)
# # Average over entire data set: # ne_TS_w = scipy.mean(ne_TS, axis=1) # dev_ne_TS_w = scipy.std(ne_TS, axis=1, ddof=1) # R_mid_w = scipy.mean(R_mid_CTS, axis=1) # dev_R_mid_w = scipy.std(R_mid_CTS, axis=1, ddof=1) # # ne_ETS_w = scipy.stats.nanmean(ne_ETS, axis=1) # dev_ne_ETS_w = scipy.stats.nanstd(ne_ETS, axis=1) # R_mid_ETS_w = scipy.mean(R_mid_ETS, axis=1) # dev_R_mid_ETS_w = scipy.std(R_mid_ETS, axis=1, ddof=1) # Average over entire data set, try using roubust estimators: # IQR_to_std = 1.349 robust = True ne_TS_w, dev_ne_TS_w = gptools.compute_stats(ne_TS, robust=robust) R_mid_w, dev_R_mid_w = gptools.compute_stats(R_mid_CTS, robust=robust) ne_ETS_w, dev_ne_ETS_w = gptools.compute_stats(ne_ETS, robust=robust, check_nan=True) R_mid_ETS_w, dev_R_mid_ETS_w = gptools.compute_stats(R_mid_ETS, robust=robust) # # Make Q-Q plots with the robust statistics dictating the distribution: # for k in xrange(0, ne_TS.shape[0]): # ne_ch = ne_TS[k, :] # ne_ch = ne_ch[~scipy.isnan(ne_ch)] # f = plt.figure() # scipy.stats.probplot(ne_ch, sparams=(ne_TS_w[k], dev_ne_TS_w[k]), plot=plt) # f.suptitle('CTS: idx=%d, R=%.3fm' % (k, R_mid_w[k])) # for k in xrange(0, ne_ETS.shape[0]):
# Te_GPC_w = scipy.mean(Te_GPC, axis=1) # dev_Te_GPC_w = scipy.std(Te_GPC, axis=1, ddof=1) # R_mid_GPC_w = scipy.mean(R_mid_GPC, axis=1) # dev_R_mid_GPC_w = scipy.std(R_mid_GPC, axis=1, ddof=1) # # Get rid of clearly too small points (Why do these happen?) # good_idxs = (Te_GPC_w >= 0.1) # Te_GPC_w = Te_GPC_w[good_idxs] # dev_Te_GPC_w = dev_Te_GPC_w[good_idxs] # R_mid_GPC_w = R_mid_GPC_w[good_idxs] # dev_R_mid_GPC_w = dev_R_mid_GPC_w[good_idxs] # Average over entire data set, use robust estimators: # IQR_to_std = 1.349 robust = True Te_TS_w, dev_Te_TS_w = gptools.compute_stats(Te_TS, robust=robust) R_mid_w, dev_R_mid_w = gptools.compute_stats(R_mid_CTS, robust=robust) Te_ETS_w, dev_Te_ETS_w = gptools.compute_stats(Te_ETS, robust=robust, check_nan=True) R_mid_ETS_w, dev_R_mid_ETS_w = gptools.compute_stats(R_mid_ETS, robust=robust) Te_FRC_w, dev_Te_FRC_w = gptools.compute_stats(Te_FRC, robust=robust) R_mid_FRC_w, dev_R_mid_FRC_w = gptools.compute_stats(R_mid_FRC, robust=robust) # Get rid of clearly too small points (Why do these happen?) good_idxs = (Te_FRC_w >= 0.1) Te_FRC_w = Te_FRC_w[good_idxs] dev_Te_FRC_w = dev_Te_FRC_w[good_idxs] R_mid_FRC_w = R_mid_FRC_w[good_idxs] dev_R_mid_FRC_w = dev_R_mid_FRC_w[good_idxs]
# # Average over entire data set: # ne_TS_w = scipy.mean(ne_TS, axis=1) # dev_ne_TS_w = scipy.std(ne_TS, axis=1, ddof=1) # R_mid_w = scipy.mean(R_mid_CTS, axis=1) # dev_R_mid_w = scipy.std(R_mid_CTS, axis=1, ddof=1) # # ne_ETS_w = scipy.stats.nanmean(ne_ETS, axis=1) # dev_ne_ETS_w = scipy.stats.nanstd(ne_ETS, axis=1) # R_mid_ETS_w = scipy.mean(R_mid_ETS, axis=1) # dev_R_mid_ETS_w = scipy.std(R_mid_ETS, axis=1, ddof=1) # Average over entire data set, try using roubust estimators: # IQR_to_std = 1.349 robust = True ne_TS_w, dev_ne_TS_w = gptools.compute_stats(ne_TS, robust=robust) R_mid_w, dev_R_mid_w = gptools.compute_stats(R_mid_CTS, robust=robust) ne_ETS_w, dev_ne_ETS_w = gptools.compute_stats(ne_ETS, robust=robust, check_nan=True) R_mid_ETS_w, dev_R_mid_ETS_w = gptools.compute_stats(R_mid_ETS, robust=robust) # # Make Q-Q plots with the robust statistics dictating the distribution: # for k in xrange(0, ne_TS.shape[0]): # ne_ch = ne_TS[k, :] # ne_ch = ne_ch[~scipy.isnan(ne_ch)] # f = plt.figure() # scipy.stats.probplot(ne_ch, sparams=(ne_TS_w[k], dev_ne_TS_w[k]), plot=plt) # f.suptitle('CTS: idx=%d, R=%.3fm' % (k, R_mid_w[k])) # for k in xrange(0, ne_ETS.shape[0]): # ne_ch = ne_ETS[k, :] # ne_ch = ne_ch[~scipy.isnan(ne_ch)]