def test_argsreduce(): A = np.reshape(linspace(0, 19, 20), (4, 5)) B = 2 C = range(5) cond = np.ones(A.shape) [_A1, B1, _C1] = argsreduce(cond, A, B, C) assert B1.shape == (20,) cond[2, :] = 0 [A2, B2, C2] = argsreduce(cond, A, B, C) assert B2.shape == (15,) assert_allclose(A2, [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 15., 16., 17., 18., 19.]) assert_allclose(B2, [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) assert_allclose(C2, [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
def test_argsreduce(): A = np.reshape(linspace(0, 19, 20), (4, 5)) B = 2 C = range(5) cond = np.ones(A.shape) [_A1, B1, _C1] = argsreduce(cond, A, B, C) assert_equal(B1.shape, (20,)) cond[2, :] = 0 [A2, B2, C2] = argsreduce(cond, A, B, C) assert_equal(B2.shape, (15,)) assert_array_equal(A2, np.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 15., 16., 17., 18., 19.])) assert_array_equal( B2, np.array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])) assert_array_equal( C2, np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]))
def test_argsreduce(): A = linspace(0, 19, 20).reshape((4, 5)) B = 2 C = range(5) cond = np.ones(A.shape) [_A1, B1, _C1] = argsreduce(cond, A, B, C) assert_equal(B1.shape, (20,)) cond[2, :] = 0 [A2, B2, C2] = argsreduce(cond, A, B, C) assert_equal(B2.shape, (15,)) assert_array_equal(A2, np.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 15., 16., 17., 18., 19.])) assert_array_equal( B2, np.array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])) assert_array_equal( C2, np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]))
def _nlogps(self, theta, x): """ Moran's negative log Product Spacings statistic where theta are the parameters (including loc and scale) Note the data in x must be sorted References ----------- R. C. H. Cheng; N. A. K. Amin (1983) "Estimating Parameters in Continuous Univariate Distributions with a Shifted Origin.", Journal of the Royal Statistical Society. Series B (Methodological), Vol. 45, No. 3. (1983), pp. 394-403. R. C. H. Cheng; M. A. Stephens (1989) "A Goodness-Of-Fit Test Using Moran's Statistic with Estimated Parameters", Biometrika, 76, 2, pp 385-392 Wong, T.S.T. and Li, W.K. (2006) "A note on the estimation of extreme value distributions using maximum product of spacings.", IMS Lecture Notes Monograph Series 2006, Vol. 52, pp. 272-283 """ n = 2 if isinstance(self.dist, rv_continuous) else 1 try: loc = theta[-n] scale = theta[-1] args = tuple(theta[:-n]) except IndexError: raise ValueError("Not enough input arguments.") if not isinstance(self.dist, rv_continuous): scale = 1 if not self.dist._argcheck(*args) or scale <= 0: return np.inf dist = self.dist x = asarray((x - loc) / scale) cond0 = (x <= dist.a) | (dist.b <= x) Nbad = np.sum(cond0) if Nbad > 0: x = argsreduce(~cond0, x)[0] lowertail = True if lowertail: prb = np.hstack((0.0, dist.cdf(x, *args), 1.0)) dprb = np.diff(prb) else: prb = np.hstack((1.0, dist.sf(x, *args), 0.0)) dprb = -np.diff(prb) logD = log(dprb) dx = np.diff(x, axis=0) tie = (dx == 0) if any(tie): # TODO : implement this method for treating ties in data: # Assume measuring error is delta. Then compute # yL = F(xi-delta,theta) # yU = F(xi+delta,theta) # and replace # logDj = log((yU-yL)/(r-1)) for j = i+1,i+2,...i+r-1 # The following is OK when only minimization of T is wanted i_tie, = np.nonzero(tie) tiedata = x[i_tie] logD[i_tie + 1] = log(dist._pdf(tiedata, *args)) - log(scale) finiteD = np.isfinite(logD) nonfiniteD = 1 - finiteD Nbad += np.sum(nonfiniteD, axis=0) if Nbad > 0: T = -np.sum(logD[finiteD], axis=0) + 100.0 * log(_XMAX) * Nbad else: T = -np.sum(logD, axis=0) return T