def test_array(self): ar = np.array([1, 3, 2, 0, 15]) # [2,5] interval = verif.interval.Interval(2, 5, True, True) np.testing.assert_array_equal( np.array([False, True, True, False, False]), interval.within(ar)) np.testing.assert_array_equal(np.array(True), interval.within(3))
def _compute_abcd(self, obs, fcst, interval, f_interval=None): if f_interval is None: f_interval = interval value = np.nan if (len(fcst) > 0): # Compute frequencies if (self._usingQuantiles): fcstSort = np.sort(fcst) obsSort = np.sort(obs) f_qinterval = self._quantile_to_threshold(fcstSort, f_interval) o_qinterval = self._quantile_to_threshold(obsSort, interval) a = np.ma.sum( f_qinterval.within(fcst) & o_qinterval.within(obs)) # Hit b = np.ma.sum( f_qinterval.within(fcst) & (o_qinterval.within(obs) == 0)) # FA c = np.ma.sum((f_qinterval.within(fcst) == 0) & o_qinterval.within(obs)) # Miss d = np.ma.sum((f_qinterval.within(fcst) == 0) & (o_qinterval.within(obs) == 0)) # CR else: a = np.ma.sum(f_interval.within(fcst) & interval.within(obs)) # Hit b = np.ma.sum( f_interval.within(fcst) & (interval.within(obs) == 0)) # FA c = np.ma.sum((f_interval.within(fcst) == 0) & interval.within(obs)) # Miss d = np.ma.sum((f_interval.within(fcst) == 0) & (interval.within(obs) == 0)) # CR return [a, b, c, d]
def compute_single(self, data, input_index, axis, axis_index, interval): [obs, fcst] = data.get_scores([verif.field.Obs(), verif.field.Fcst()], input_index, axis, axis_index) assert(obs.shape[0] == fcst.shape[0]) if axis == verif.axis.Obs(): I = np.where(interval.within(obs)) obs = obs[I] fcst = fcst[I] elif axis == verif.axis.Fcst(): I = np.where(interval.within(fcst)) obs = obs[I] fcst = fcst[I] return self.compute_from_obs_fcst(obs, fcst, interval)
def compute_single(self, data, input_index, axis, axis_index, interval): """ Compute probabilities based on thresholds """ p0 = 0 p1 = 1 if(interval.lower != -np.inf and interval.upper != np.inf): var0 = verif.field.Threshold(interval.lower) var1 = verif.field.Threshold(interval.upper) [obs, p0, p1] = data.get_scores([verif.field.Obs(), var0, var1], input_index, axis, axis_index) elif(interval.lower != -np.inf): var0 = verif.field.Threshold(interval.lower) [obs, p0] = data.get_scores([verif.field.Obs(), var0], input_index, axis, axis_index) elif(interval.upper != np.inf): var1 = verif.field.Threshold(interval.upper) [obs, p1] = data.get_scores([verif.field.Obs(), var1], input_index, axis, axis_index) obsP = interval.within(obs) p = p1 - p0 # Prob of obs within range bs = np.nan * np.zeros(len(p), 'float') # Split into bins and compute Brier score on each bin for i in range(0, len(self._edges) - 1): I = np.where((p >= self._edges[i]) & (p < self._edges[i + 1]))[0] if(len(I) > 0): bs[I] = (np.mean(p[I]) - obsP[I]) ** 2 return verif.util.nanmean(bs)
def test_nan(self): """ Test within returns np.nan when the input is np.nan. Interval.within returns a masked array, which is converted to a regular aray so that it can be tested here. """ ar = np.array([1, 3, 2, np.nan, 15]) interval = verif.interval.Interval(2, 5, True, True) values = interval.within(ar) values = np.ma.filled(values, fill_value=np.nan) answer = np.ma.masked_array([False, True, True, np.nan, False], mask=[0, 0, 0, 1, 0]) np.testing.assert_array_equal(answer, values) # Test scalar self.assertTrue(np.isnan(interval.within(np.nan)))
def get_p(data, input_index, axis, axis_index, interval): """ Retrieves and computes forecast probability and verifying observation for being inside interval Returns: obs (np.array): True when observation is inside interval p (np.array): True when forecast is inside interval """ p0 = 0 p1 = 1 if interval.lower != -np.inf and interval.upper != np.inf: var0 = verif.field.Threshold(interval.lower) var1 = verif.field.Threshold(interval.upper) [obs, p0, p1] = data.get_scores([verif.field.Obs(), var0, var1], input_index, axis, axis_index) elif interval.lower != -np.inf: var0 = verif.field.Threshold(interval.lower) [obs, p0] = data.get_scores([verif.field.Obs(), var0], input_index, axis, axis_index) elif interval.upper != np.inf: var1 = verif.field.Threshold(interval.upper) [obs, p1] = data.get_scores([verif.field.Obs(), var1], input_index, axis, axis_index) obsP = interval.within(obs) p = p1 - p0 # Prob of obs within range return [obsP, p]
def get_p(data, input_index, axis, axis_index, interval): p0 = 0 p1 = 1 if(interval.lower != -np.inf and interval.upper != np.inf): var0 = verif.field.Threshold(interval.lower) var1 = verif.field.Threshold(interval.upper) [obs, p0, p1] = data.get_scores([verif.field.Obs(), var0, var1], input_index, axis, axis_index) elif(interval.lower != -np.inf): var0 = verif.field.Threshold(interval.lower) [obs, p0] = data.get_scores([verif.field.Obs(), var0], input_index, axis, axis_index) elif(interval.upper != np.inf): var1 = verif.field.Threshold(interval.upper) [obs, p1] = data.get_scores([verif.field.Obs(), var1], input_index, axis, axis_index) obsP = interval.within(obs) p = p1 - p0 # Prob of obs within range return [obsP, p]
def compute_single(self, data, input_index, axis, axis_index, interval): if(np.isinf(interval.lower)): pvar = verif.field.Threshold(interval.upper) [obs, p1] = data.get_scores([verif.field.Obs(), pvar], input_index, axis, axis_index) p0 = 0 * p1 elif(np.isinf(interval.upper)): pvar = verif.field.Threshold(interval.lower) [obs, p0] = data.get_scores([verif.field.Obs(), pvar], input_index, axis, axis_index) p1 = 0 * p0 + 1 else: pvar0 = verif.field.Threshold(interval.lower) pvar1 = verif.field.Threshold(interval.upper) [obs, p0, p1] = data.get_scores([verif.field.Obs(), pvar0, pvar1], input_index, axis, axis_index) obs = interval.within(obs) p = p1 - p0 if(np.mean(p) == 0): return np.nan return np.mean(obs) / np.mean(p)
def compute_single(self, data, input_index, axis, axis_index, interval): fields = [self._field] axis_pos = None if axis == verif.axis.Obs(): if self._field != verif.field.Obs(): fields += [verif.field.Obs()] axis_pos = len(fields) - 1 elif axis == verif.axis.Fcst(): if self._field != verif.field.Fcst(): fields += [verif.field.Fcst()] axis_pos = len(fields) - 1 if self._aux is not None: fields += [self._aux] values_array = data.get_scores(fields, input_index, axis, axis_index) values = values_array[0] # Subset if we have a subsetting axis if axis_pos is not None: I = np.where(interval.within(values_array[axis_pos]))[0] values = values[I] return self.aggregator(values)
def compute_single(self, data, input_index, axis, axis_index, interval): values = data.get_scores(self._x, input_index, axis, axis_index) I = np.where(interval.within(values))[0] if(len(I) == 0): return np.nan return len(I)
def compute_from_obs_fcst(self, obs, fcst, interval): I = np.where(interval.within(obs))[0] if(len(I) == 0): return np.nan return self._func(obs[I])
def compute_from_obs_fcst(self, obs, fcst, interval): diff = abs(obs - fcst) return np.mean(interval.within(diff)) * 100
def compare(self, interval, expected): """ Run interval.within on all elements in _tests and compare the result with expected """ for i in range(0, len(self._tests)): self.assertEqual(expected[i], interval.within(self._tests[i]))