Exemplo n.º 1
0
def itu1770(data, fs, gated=False):
	nc = data.shape[0]
	ns = data.shape[1]
	g = np.array([1.0, 1.0, 1.0, 0.0, 1.41, 1.41]) # FL+FR+FC+LFE+BL+BR
	g = g[0:nc].reshape(nc,1)
	b, a = kfilter_coeffs(fs)
	data_k = signal.lfilter(b, a, data, 1)
	if gated:
		ns_gate = int(fs*0.4)
		ns_step = int((1-0.75)*ns_gate)
		steps = int((ns-ns_gate) / (ns_step)) + 1
		z = np.zeros((nc, steps), dtype=np.float)
		for i in range(steps):
			j = i*ns_step
			z[:,i:i+1] = (data_k[:,j:j+ns_gate]**2).mean(1, keepdims=True)
		with np.errstate(divide='ignore'):
			l = -0.691 + 10.0*np.log10((g*z).sum(0))
		gamma_a = -70
		j_a = np.flatnonzero(l > gamma_a)
		gamma_r = -0.691 + 10.0*np.log10( (g*(np.take(z, j_a, 1).mean(1, keepdims=1))).sum() ) - 10
		j_r = np.flatnonzero(l > gamma_r)
		l_kg = -0.691 + 10.0*np.log10( (g*(np.take(z, j_r, 1).mean(1, keepdims=1))).sum() )
		return l_kg
	else:
		z = (data_k**2).mean(1, keepdims=1)
		with np.errstate(divide='ignore'):
			l_k = -0.691 + 10.0*np.log10( (g*z).sum() )
		return l_k
Exemplo n.º 2
0
def getEff(s, cut, comp='joint', reco=True):

    eff, sig, relerr = {},{},{}
    a = np.log10(s['MC_energy'])
    Ebins = getEbins()
    Emids = getMids(Ebins)
    erangeDict = getErange()

    c0 = cut
    if comp != 'joint':
        compcut = s['comp'] == comp
        c0 = cut * compcut

    # Set radii for finding effective area
    rDict = {}
    keys = ['low', 'mid', 'high']
    for key in keys:
        rDict[key] = np.array([600, 800, 1100, 1700, 2600, 2900])
    rDict['low'][1] = 600
    Ebreaks = np.array([4, 5, 6, 7, 8, 9])
    rgrp = np.digitize(Emids, Ebreaks) - 1

    for key in keys:

        # Get efficiency and sigma
        simcut = np.array([sim in erangeDict[key] for sim in s['sim']])
        k = np.histogram(a[c0*simcut], bins=Ebins)[0]
        #k = Nfinder(a, c0*simcut)
        n = s['MC'][comp][key].astype('float')
        eff[key], sig[key], relerr[key] = np.zeros((3, len(k)))
        with np.errstate(divide='ignore', invalid='ignore'):
            eff[key] = k / n
            var = (k+1)*(k+2)/((n+2)*(n+3)) - (k+1)**2/((n+2)**2)
        sig[key] = np.sqrt(var)

        # Multiply by throw area
        r = np.array([rDict[key][i] for i in rgrp])
        eff[key] *= np.pi*(r**2)
        sig[key] *= np.pi*(r**2)

        # Deal with parts of the arrays with no information
        for i in range(len(eff[key])):
            if n[i] == 0:
                eff[key][i] = 0
                sig[key][i] = np.inf

    # Combine low, mid, and high energy datasets
    eff_tot = (np.sum([eff[key]/sig[key] for key in keys], axis=0) /
            np.sum([1/sig[key] for key in keys], axis=0))
    sig_tot = np.sqrt(1 / np.sum([1/sig[key]**2 for key in keys], axis=0))
    with np.errstate(divide='ignore'):
        relerr  = sig_tot / eff_tot

    # UGH : find better way to do this
    if reco:
        eff_tot = eff_tot[20:]
        sig_tot = sig_tot[20:]
        relerr  = relerr[20:]

    return eff_tot, sig_tot, relerr
Exemplo n.º 3
0
def _solve_quadratic(a__, b__, c__, min_val=0.0, max_val=1.0):
    """Solve quadratic equation and return the valid roots from interval
    [*min_val*, *max_val*]

    """

    def int_and_float_to_numpy(val):
        if not isinstance(val, np.ndarray):
            if isinstance(val, (int, float)):
                val = [val]
            val = np.array(val)
        return val

    a__ = int_and_float_to_numpy(a__)
    b__ = int_and_float_to_numpy(b__)
    c__ = int_and_float_to_numpy(c__)

    discriminant = b__ * b__ - 4 * a__ * c__

    # Solve the quadratic polynomial
    with np.errstate(invalid='ignore', divide='ignore'):
        x_1 = (-b__ + np.sqrt(discriminant)) / (2 * a__)
        x_2 = (-b__ - np.sqrt(discriminant)) / (2 * a__)

    # Find valid solutions, ie. 0 <= t <= 1
    x__ = x_1.copy()
    with np.errstate(invalid='ignore'):
        idxs = (x_1 < min_val) | (x_1 > max_val)
    x__[idxs] = x_2[idxs]

    with np.errstate(invalid='ignore'):
        idxs = (x__ < min_val) | (x__ > max_val)
    x__[idxs] = np.nan

    return x__
Exemplo n.º 4
0
 def test_no_scaling(self):
     # Test writing image converting types when no scaling
     img_class = self.image_class
     hdr_class = img_class.header_class
     hdr = hdr_class()
     supported_types = supported_np_types(hdr)
     slope = 2
     inter = 10 if hdr.has_data_intercept else 0
     for in_dtype, out_dtype in itertools.product(
         FLOAT_TYPES + IUINT_TYPES,
         supported_types):
         # Need to check complex scaling
         mn_in, mx_in = _dt_min_max(in_dtype)
         arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype)
         img = img_class(arr, np.eye(4), hdr)
         img.set_data_dtype(out_dtype)
         img.header.set_slope_inter(slope, inter)
         with np.errstate(invalid='ignore'):
             rt_img = bytesio_round_trip(img)
         with suppress_warnings():  # invalid mult
             back_arr = rt_img.get_data()
         exp_back = arr.copy()
         if in_dtype not in COMPLEX_TYPES:
             exp_back = arr.astype(float)
         if out_dtype in IUINT_TYPES:
             with np.errstate(invalid='ignore'):
                 exp_back = np.round(exp_back)
             exp_back = np.clip(exp_back, *shared_range(float, out_dtype))
             exp_back = exp_back.astype(out_dtype).astype(float)
         else:
             exp_back = exp_back.astype(out_dtype)
         # Allow for small differences in large numbers
         with suppress_warnings():  # invalid value
             assert_allclose_safely(back_arr,
                                    exp_back * slope + inter)
Exemplo n.º 5
0
def test_3d():
    x = np.array([[9.0, 3.0, nan, nan, 9.0, nan],
                  [1.0, 1.0, 1.0, nan, nan, nan],
                  [2.0, 2.0, 0.1, nan, 1.0, nan],  # 0.0 kills geometric mean
                  [3.0, 9.0, 2.0, nan, nan, nan],
                  [4.0, 4.0, 3.0, 9.0, 2.0, nan],
                  [5.0, 5.0, 4.0, 4.0, nan, nan]])
    sectors = ['a', 'b', 'a', 'b', 'a', 'c']
    x = np.dstack((x,x))
    
    for func in funcs_one:
        xc = x.copy()
        args = (xc,)
        with np.errstate(invalid='ignore', divide='ignore'):
            yield check_return_array, func, args
        
    for func in funcs_oneint:
        xc = x.copy()
        args = (xc, 2)
        with np.errstate(invalid='ignore', divide='ignore'):
            yield check_return_array, func, args
        
    for func in funcs_onefrac:
        xc = x.copy()
        args = (xc, -1, 0.5)
        with np.errstate(invalid='ignore', divide='ignore'):
            yield check_return_array, func, args
    
    for func in funcs_sect:
        xc = x.copy()
        args = (xc, sectors)
        with np.errstate(invalid='ignore', divide='ignore'):
            yield check_return_array, func, args
Exemplo n.º 6
0
def test_SphericalCoordinates_bounds(pyntcloud_with_rgb_and_normals):
    scalar_field = SphericalCoordinates(
        pyntcloud=pyntcloud_with_rgb_and_normals)
    scalar_field.extract_info()

    with np.errstate(divide='ignore', invalid='ignore'):
        scalar_field.compute()

    assert all(scalar_field.to_be_added["polar"] >= 0)
    assert all(scalar_field.to_be_added["polar"] <= 180)

    assert all(scalar_field.to_be_added["azimuthal"] >= -180)
    assert all(scalar_field.to_be_added["azimuthal"] <= 180)

    scalar_field = SphericalCoordinates(
        pyntcloud=pyntcloud_with_rgb_and_normals,
        degrees=False)
    scalar_field.extract_info()

    with np.errstate(divide='ignore', invalid='ignore'):
        scalar_field.compute()

    assert all(scalar_field.to_be_added["polar"] >= 0)
    assert all(scalar_field.to_be_added["polar"] <= np.pi)

    assert all(scalar_field.to_be_added["azimuthal"] >= -np.pi)
    assert all(scalar_field.to_be_added["azimuthal"] <= np.pi)
Exemplo n.º 7
0
    def test_div(self):

        # integer div, but deal with the 0's (GH 9144)
        p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
        result = p / p

        expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]),
                              'second': Series([nan, nan, nan, 1])})
        assert_frame_equal(result, expected)

        with np.errstate(all='ignore'):
            arr = p.values.astype('float') / p.values
        result2 = DataFrame(arr, index=p.index,
                            columns=p.columns)
        assert_frame_equal(result2, expected)

        result = p / 0
        expected = DataFrame(np.inf, index=p.index, columns=p.columns)
        expected.iloc[0:3, 1] = nan
        assert_frame_equal(result, expected)

        # numpy has a slightly different (wrong) treatement
        with np.errstate(all='ignore'):
            arr = p.values.astype('float64') / 0
        result2 = DataFrame(arr, index=p.index,
                            columns=p.columns)
        assert_frame_equal(result2, expected)

        p = DataFrame(np.random.randn(10, 5))
        s = p[0]
        res = s / p
        res2 = p / s
        self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
Exemplo n.º 8
0
def compute_mandelbrot():
	N_max = 50
	some_threshold = 50

	grid_interval = 1000

	# construct the grid 
	x = np.linspace(-2, 1, grid_interval)
	y = np.linspace(-1.5, 1.5, grid_interval)

	c = x[:, np.newaxis] + 1j*y[np.newaxis, :]

	# do the iteration 
	z = c 
	for v in range(N_max):
		with np.errstate(all = "ignore"): # catches overflow and invalid value errors 
			z = z**2 + c 
		
	with np.errstate(all = "ignore"): # catches overflow and invalid value errors 

		# form a 2-D boolean mask 
		mask = (abs(z) < some_threshold)

	# save the result to an image
	plt.imshow(mask.T, extent = [-2, 1, -1.5, 1.5])
	plt.gray()
	plt.savefig('mandelbrot.png')
Exemplo n.º 9
0
def test_return_array():
    "Check that functions return a numpy array or a scalar."
    
    x = np.array([[9.0, 3.0, nan, nan, 9.0, nan],
                  [1.0, 1.0, 1.0, nan, nan, nan],
                  [2.0, 2.0, 9.0, nan, 1.0, nan],
                  [3.0, 9.0, 2.0, nan, nan, nan],
                  [4.0, 4.0, 3.0, 9.0, 2.0, nan],
                  [5.0, 5.0, 4.0, 4.0, nan, nan]])
    sectors = ['a', 'b', 'a', 'b', 'a', 'c']
    
    for func in funcs_one:
        xc = x.copy()
        args = (xc,)
        with np.errstate(invalid='ignore', divide='ignore'):
            yield check_return_array, func, args
        
    for func in funcs_oneint:
        xc = x.copy()
        args = (xc, 2)
        with np.errstate(invalid='ignore', divide='ignore'):
            yield check_return_array, func, args
        
    for func in funcs_onefrac:
        xc = x.copy()
        args = (xc, -1, 0.5)
        with np.errstate(invalid='ignore', divide='ignore'):
            yield check_return_array, func, args
        
    for func in funcs_sect:
        xc = x.copy()
        args = (xc, sectors)
        with np.errstate(invalid='ignore', divide='ignore'):
            yield check_return_array, func, args
Exemplo n.º 10
0
def test_a2f_min_max():
    # Check min and max thresholding of array to file
    str_io = BytesIO()
    for in_dt in (np.float32, np.int8):
        for out_dt in (np.float32, np.int8):
            arr = np.arange(4, dtype=in_dt)
            # min thresholding
            with np.errstate(invalid='ignore'):
                data_back = write_return(arr, str_io, out_dt, 0, 0, 1, 1)
            assert_array_equal(data_back, [1, 1, 2, 3])
            # max thresholding
            with np.errstate(invalid='ignore'):
                data_back = write_return(arr, str_io, out_dt, 0, 0, 1, None, 2)
            assert_array_equal(data_back, [0, 1, 2, 2])
            # min max thresholding
            data_back = write_return(arr, str_io, out_dt, 0, 0, 1, 1, 2)
            assert_array_equal(data_back, [1, 1, 2, 2])
    # Check that works OK with scaling and intercept
    arr = np.arange(4, dtype=np.float32)
    data_back = write_return(arr, str_io, np.int, 0, -1, 0.5, 1, 2)
    assert_array_equal(data_back * 0.5 - 1, [1, 1, 2, 2])
    # Even when scaling is negative
    data_back = write_return(arr, str_io, np.int, 0, 1, -0.5, 1, 2)
    assert_array_equal(data_back * -0.5 + 1, [1, 1, 2, 2])
    # Check complex numbers
    arr = np.arange(4, dtype=np.complex64) + 100j
    with suppress_warnings():  # cast to real
        data_back = write_return(arr, str_io, out_dt, 0, 0, 1, 1, 2)
    assert_array_equal(data_back, [1, 1, 2, 2])
Exemplo n.º 11
0
 def predict_proba(self, X):
     """
     Return the probability that the class label is +1 given X.
     :param X: An examples-by-features NumPy matrix.
     :returns: Pr[Y=1|X]
     """
     Xstd = self._standardize_inputs(X)
     Pacc = np.zeros((2, X.shape[0]))
     for y, yprob in enumerate(self._yparam):
         for i, matrix in enumerate(self._params):
             # This is a v-length vector of probabilities conditioned on y
             probabilities = matrix[y]
             # The X values are indices into this vector!  We just sum up
             # logs (instead of multiplying).  We silence divide by zero
             # errors because that's what NumPy gives when it takes the log
             # of 0.  Thankfully, np.log(0) = -inf, and np.exp(-np.inf) = 0,
             # so we're safe here!
             with np.errstate(divide='ignore'):
                 Pacc[y] += np.log(probabilities[Xstd[:, i].astype(int)])
         Pacc[y] += np.log(yprob)
     # Now, return the probability Y is positive conditioned on X!
     probs = np.exp(Pacc)
     with np.errstate(invalid='ignore'):
         # We silence invalid value warnings here, because that's what we
         # get when we do 0/0.  In this case, we haven't seen any training
         # examples with those values, and m is set to 0, so we just go
         # 50/50 and assign 0.5!
         rv = probs[1] / (probs[0] + probs[1])
     rv[np.isnan(rv)] = 0.5
     return rv
Exemplo n.º 12
0
def rectify_obs(obs):
    """Make sure the passed obs dictionary conforms to code expectations,
    and make simple fixes when possible.
    """
    k = obs.keys()
    if 'maggies' not in k:
        obs['maggies'] = None
        obs['maggies_unc'] = None
    if 'spectrum' not in k:
        obs['spectrum'] = None
        obs['unc'] = None
    if obs['maggies'] is not None:
        assert (len(obs['filters']) == len(obs['maggies']))
        assert ('maggies_unc' in k)
        assert ((len(obs['maggies']) == len(obs['maggies_unc'])) or
                (np.size(obs['maggies_unc'] == 1)))
        m = obs.get('phot_mask', np.ones(len(obs['maggies']), dtype=bool))
        obs['phot_mask'] = (m * np.isfinite(obs['maggies']) *
                            np.isfinite(obs['maggies_unc']) *
                            (obs['maggies_unc'] > 0))
        try:
            obs['filternames'] = [f.name for f in obs['filters']]
        except:
            pass

    if 'logify_spectrum' not in k:
        obs['logify_spectrum'] = False
    if obs['spectrum'] is not None:
        assert (len(obs['wavelength']) == len(obs['spectrum']))
        assert ('unc' in k)
        np.errstate(invalid='ignore')
        m = obs.get('mask', np.ones(len(obs['wavelength']), dtype=bool))
        obs['mask'] = (m * np.isfinite(obs['spectrum']) *
                       np.isfinite(obs['unc']) * (obs['unc'] > 0))
    return obs
Exemplo n.º 13
0
 def test_correlation_9(self):
     "farray.correlation_9"
     x = self.a1
     y = self.a2
     x2 = np.empty((2, x.shape[0], x.shape[1]))
     x2[0] = x
     x2[1] = x
     y2 = np.empty((2, y.shape[0], y.shape[1]))
     y2[0] = y
     y2[1] = y        
     with np.errstate(invalid='ignore'):
         corr = correlation(x, y, axis=-1)
     desired = np.array([nan, 1, -1, -0.5]) 
     aae(corr, desired, err_msg="aggregate of 1d tests")
     x = self.b1
     y = self.b2
     x2 = np.empty((2, x.shape[0], x.shape[1]))
     x2[0] = x
     x2[1] = x
     y2 = np.empty((2, y.shape[0], y.shape[1]))
     y2[0] = y
     y2[1] = y        
     with np.errstate(invalid='ignore'):
         corr = correlation(x, y, axis=-1)
     desired = np.array([nan, 1, -1, -0.5]) 
     aae(corr, desired, err_msg="aggregate of 1d tests")
Exemplo n.º 14
0
def test_nanvar_issue60():
    "nanvar regression test (issue #60)"

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")

        f = bn.nanvar([1.0], ddof=1)
        with np.errstate(invalid='ignore'):
            s = bn.slow.nanvar([1.0], ddof=1)
        assert_equal(f, s, err_msg="bn.nanvar([1.0], ddof=1) wrong")

        f = bn.nanvar([1], ddof=1)
        with np.errstate(invalid='ignore'):
            s = bn.slow.nanvar([1], ddof=1)
        assert_equal(f, s, err_msg="bn.nanvar([1], ddof=1) wrong")

        f = bn.nanvar([1, np.nan], ddof=1)
        with np.errstate(invalid='ignore'):
            s = bn.slow.nanvar([1, np.nan], ddof=1)
        assert_equal(f, s, err_msg="bn.nanvar([1, nan], ddof=1) wrong")

        f = bn.nanvar([[1, np.nan], [np.nan, 1]], axis=0, ddof=1)
        with np.errstate(invalid='ignore'):
            s = bn.slow.nanvar([[1, np.nan], [np.nan, 1]], axis=0, ddof=1)
        assert_equal(f, s, err_msg="issue #60 regression")
Exemplo n.º 15
0
    def test_modulo(self):
        # GH3590, modulo as ints
        p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})

        # this is technically wrong as the integer portion is coerced to float
        # ###
        expected = DataFrame({'first': Series([0, 0, 0, 0], dtype='float64'),
                              'second': Series([np.nan, np.nan, np.nan, 0])})
        result = p % p
        assert_frame_equal(result, expected)

        # numpy has a slightly different (wrong) treatement
        with np.errstate(all='ignore'):
            arr = p.values % p.values
        result2 = DataFrame(arr, index=p.index,
                            columns=p.columns, dtype='float64')
        result2.iloc[0:3, 1] = np.nan
        assert_frame_equal(result2, expected)

        result = p % 0
        expected = DataFrame(np.nan, index=p.index, columns=p.columns)
        assert_frame_equal(result, expected)

        # numpy has a slightly different (wrong) treatement
        with np.errstate(all='ignore'):
            arr = p.values.astype('float64') % 0
        result2 = DataFrame(arr, index=p.index, columns=p.columns)
        assert_frame_equal(result2, expected)

        # not commutative with series
        p = DataFrame(np.random.randn(10, 5))
        s = p[0]
        res = s % p
        res2 = p % s
        self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
Exemplo n.º 16
0
Arquivo: move_test.py Projeto: fhal/la
def move_unit_maker(func, arrfunc, methods):
    "Test that different mov methods give the same results on 2d input."
    arr1 = np.array([1, 2, 3, 4, 5, 6, nan, nan, 7, 8, 9])
    arr2 = np.array([[9.0, 3.0, nan, nan, 9.0, nan],
                     [1.0, 1.0, 1.0, nan, nan, nan],
                     [2.0, 2.0, 0.1, nan, 1.0, nan],
                     [3.0, 9.0, 2.0, nan, nan, nan],
                     [4.0, 4.0, 3.0, 9.0, 2.0, nan],
                     [5.0, 5.0, 4.0, 4.0, nan, nan]]) 
    arr3 = np.arange(60).reshape(3, 4, 5)
    arr4 = np.array([nan, nan, nan])
    arrs = [arr1, arr2, arr3, arr4]
    msg = '\nfunc %s | method %s | nd %d | window %d | axis %d\n'
    for arr in arrs:
        for axis in range(arr.ndim):
            for w in range(1, arr.shape[axis]):
                actual = func(arr, window=w, axis=axis, method='loop')
                for method in methods:
                    if method == 'func_loop':
                        with np.errstate(invalid='ignore'):
                            d = move_func(arrfunc, arr, window=w, axis=axis,
                                          method='loop')
                    elif method == 'func_strides':
                        with np.errstate(invalid='ignore'):
                            d = move_func(arrfunc, arr, window=w, axis=axis,
                                          method='strides')
                    else:
                        d = func(arr, window=w, axis=axis, method=method) 
                    err_msg = msg % (func.__name__, method, arr.ndim, w, axis)
                    assert_array_almost_equal(actual, d, 10, err_msg)
Exemplo n.º 17
0
    def _convert_images_to_uint8(self, image_r, image_g, image_b):
        """Use the mapping to convert images image_r, image_g, and image_b to a triplet of uint8 images"""
        image_r = image_r - self.minimum[0]  # n.b. makes copy
        image_g = image_g - self.minimum[1]
        image_b = image_b - self.minimum[2]

        fac = self.map_intensity_to_uint8(self.intensity(image_r, image_g, image_b))

        image_rgb = [image_r, image_g, image_b]
        for c in image_rgb:
            c *= fac
            with np.errstate(invalid='ignore'):
                c[c < 0] = 0                # individual bands can still be < 0, even if fac isn't

        pixmax = self._uint8Max
        r0, g0, b0 = image_rgb           # copies -- could work row by row to minimise memory usage

        with np.errstate(invalid='ignore', divide='ignore'):  # n.b. np.where can't and doesn't short-circuit
            for i, c in enumerate(image_rgb):
                c = np.where(r0 > g0,
                             np.where(r0 > b0,
                                      np.where(r0 >= pixmax, c*pixmax/r0, c),
                                      np.where(b0 >= pixmax, c*pixmax/b0, c)),
                             np.where(g0 > b0,
                                      np.where(g0 >= pixmax, c*pixmax/g0, c),
                                      np.where(b0 >= pixmax, c*pixmax/b0, c))).astype(np.uint8)
                c[c > pixmax] = pixmax

                image_rgb[i] = c

        return image_rgb
Exemplo n.º 18
0
def _findAndCategorizeContours(binary):
    # see auromat.utils.outline_opencv for why we need to pad the image
    imCv = np.zeros((binary.shape[0]+2, binary.shape[1]+2), dtype=np.uint8)
    imCv[1:-1,1:-1] = binary
    contours,_ = cv.findContours(imCv, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)    
    contours = np.asarray(contours)
    contours -= 1

    area = np.asarray([cv.contourArea(c) for c in contours])
    rectAxes = np.asarray([cv.minAreaRect(c)[1] for c in contours])

    # isBigContour needs to be big enough to not discard bigger stars!
    # this could leave some spacecraft structures intact which may or may not confuse astrometry    
    # TODO the ratio below should depend on the estimated celestial pixel scale
    #      and the exposure time (longer exposure = longer star trails) 
    bigContourAreaRatio = 0.000013 # 0.0013% of the image area (~160 pixels for 12MP images)
    bigContourArea = bigContourAreaRatio*(binary.shape[0]*binary.shape[1])
    isBigContour = area > int(bigContourArea)
    isSmallContour = ~isBigContour
    
    longRatioThreshold = 5
    with np.errstate(divide='ignore', invalid='ignore'): # division produces nans and infs
        rectRatio = rectAxes[:,0]/rectAxes[:,1] if len(contours) > 0 else np.array([])

    with np.errstate(invalid='ignore'):
        isLongContour = np.logical_and(area > 20, # exclude very tiny long contours (could be stars) and inf ratios
                                       np.logical_or(rectRatio > longRatioThreshold, 
                                                     rectRatio < 1/longRatioThreshold)
                                       )
    isSmallLongContour = np.logical_and(isSmallContour, isLongContour)
    isSmallShortContour = np.logical_and(isSmallContour, ~isLongContour)
    
    return contours, area, isBigContour, isSmallLongContour, isSmallShortContour
Exemplo n.º 19
0
def test_nans():
    apparent_zenith = np.array([10, np.nan, 10])
    apparent_azimuth = np.array([180, 180, np.nan])
    with np.errstate(invalid='ignore'):
        tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
                                           axis_tilt=0, axis_azimuth=0,
                                           max_angle=90, backtrack=True,
                                           gcr=2.0/7.0)
    expect = {'tracker_theta': np.array([0, nan, nan]),
              'aoi': np.array([10, nan, nan]),
              'surface_azimuth': np.array([90, nan, nan]),
              'surface_tilt': np.array([0, nan, nan])}
    for k, v in expect.items():
        assert_allclose(tracker_data[k], v)

    # repeat with Series because nans can differ
    apparent_zenith = pd.Series(apparent_zenith)
    apparent_azimuth = pd.Series(apparent_azimuth)
    with np.errstate(invalid='ignore'):
        tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
                                           axis_tilt=0, axis_azimuth=0,
                                           max_angle=90, backtrack=True,
                                           gcr=2.0/7.0)
    expect = pd.DataFrame(np.array(
        [[ 0., 10., 90.,  0.],
         [nan, nan, nan, nan],
         [nan, nan, nan, nan]]),
        columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
    assert_frame_equal(tracker_data, expect)
Exemplo n.º 20
0
 def test_scaling(self):
     # Test integer scaling from float
     # Analyze headers cannot do float-integer scaling
     hdr = self.header_class()
     assert_true(hdr.default_x_flip)
     shape = (1,2,3)
     hdr.set_data_shape(shape)
     hdr.set_data_dtype(np.float32)
     data = np.ones(shape, dtype=np.float64)
     S = BytesIO()
     # Writing to float datatype doesn't need scaling
     hdr.data_to_fileobj(data, S)
     rdata = hdr.data_from_fileobj(S)
     assert_array_almost_equal(data, rdata)
     # Now test writing to integers
     hdr.set_data_dtype(np.int32)
     # Writing to int needs scaling, and raises an error if we can't scale
     if not hdr.has_data_slope:
         assert_raises(HeaderTypeError, hdr.data_to_fileobj, data, BytesIO())
     # But if we aren't scaling, convert the floats to integers and write
     with np.errstate(invalid='ignore'):
         hdr.data_to_fileobj(data, S, rescale=False)
     rdata = hdr.data_from_fileobj(S)
     assert_true(np.allclose(data, rdata))
     # This won't work for floats that aren't close to integers
     data_p5 = data + 0.5
     with np.errstate(invalid='ignore'):
         hdr.data_to_fileobj(data_p5, S, rescale=False)
     rdata = hdr.data_from_fileobj(S)
     assert_false(np.allclose(data_p5, rdata))
Exemplo n.º 21
0
 def solve_for(self, data, inputs, chunk, pol, sideband='USB'):
     baselines = list(baseline for baseline in data.baselines if ((baseline.left in inputs) and (baseline.right in inputs)))
     corr_matrix = zeros([SWARM_CHANNELS, len(inputs), len(inputs)], dtype=complex128)
     for baseline in baselines:
         left_i = inputs.index(baseline.left)
         right_i = inputs.index(baseline.right)
         baseline_data = data[baseline, sideband]
         complex_data = baseline_data[0::2] + 1j * baseline_data[1::2]
         corr_matrix[:, left_i, right_i] = complex_data
         corr_matrix[:, right_i, left_i] = complex_data.conj()
     this_reference = SwarmInput(self.reference.ant, chunk, pol)
     referenced_solver = partial(solve_cgains, ref=inputs.index(this_reference))
     if self.normed:
         with errstate(invalid='ignore'):
             corr_matrix = complex_nan_to_num(corr_matrix / abs(corr_matrix))
     if not self.single_chan:
         full_spec_gains = array(self.map(referenced_solver, corr_matrix))
         delays, phases = solve_delay_phase(full_spec_gains)
     else:
         full_spec_gains = array(self.map(referenced_solver, [corr_matrix.mean(axis=0),]))
         phases = (180.0/pi) * angle(full_spec_gains.mean(axis=0))
         delays = zeros(len(inputs))
     amplitudes = abs(full_spec_gains).mean(axis=0)
     with errstate(invalid='ignore'):
         efficiency = (abs(full_spec_gains.sum(axis=1)) / abs(full_spec_gains).sum(axis=1)).real
     return efficiency, vstack([amplitudes, delays, phases])
Exemplo n.º 22
0
    def _convertImagesToUint8(self, imageR, imageG, imageB):
        """Use the mapping to convert images imageR, imageG, and imageB to a triplet of uint8 images
        """
        imageR = imageR - self.minimum[0]  # n.b. makes copy
        imageG = imageG - self.minimum[1]
        imageB = imageB - self.minimum[2]

        fac = self.mapIntensityToUint8(self.intensity(imageR, imageG, imageB))

        imageRGB = [imageR, imageG, imageB]
        with np.errstate(invalid="ignore"):  # suppress NAN warnings
            for c in imageRGB:
                c *= fac
                # individual bands can still be < 0, even if fac isn't
                c[c < 0] = 0

        pixmax = self._uint8Max
        # copies -- could work row by row to minimise memory usage
        r0, g0, b0 = imageRGB

        # n.b. np.where can't and doesn't short-circuit
        with np.errstate(invalid='ignore', divide='ignore'):
            for i, c in enumerate(imageRGB):
                c = np.where(r0 > g0,
                             np.where(r0 > b0,
                                      np.where(r0 >= pixmax, c*pixmax/r0, c),
                                      np.where(b0 >= pixmax, c*pixmax/b0, c)),
                             np.where(g0 > b0,
                                      np.where(g0 >= pixmax, c*pixmax/g0, c),
                                      np.where(b0 >= pixmax, c*pixmax/b0, c))).astype(np.uint8)
                c[c > pixmax] = pixmax

                imageRGB[i] = c

        return imageRGB
Exemplo n.º 23
0
            def _evaluate_numeric_binop(self, other):
                if isinstance(other, ABCSeries):
                    return NotImplemented
                elif isinstance(other, ABCTimedeltaIndex):
                    # Defer to TimedeltaIndex implementation
                    return NotImplemented
                elif isinstance(other, (timedelta, np.timedelta64)):
                    # GH#19333 is_integer evaluated True on timedelta64,
                    # so we need to catch these explicitly
                    if reversed:
                        return op(other, self._int64index)
                    return op(self._int64index, other)

                other = self._validate_for_numeric_binop(other, op, opstr)
                attrs = self._get_attributes_dict()
                attrs = self._maybe_update_attributes(attrs)

                left, right = self, other
                if reversed:
                    left, right = right, left

                try:
                    # apply if we have an override
                    if step:
                        with np.errstate(all='ignore'):
                            rstep = step(left._step, right)

                        # we don't have a representable op
                        # so return a base index
                        if not is_integer(rstep) or not rstep:
                            raise ValueError

                    else:
                        rstep = left._step

                    with np.errstate(all='ignore'):
                        rstart = op(left._start, right)
                        rstop = op(left._stop, right)

                    result = RangeIndex(rstart,
                                        rstop,
                                        rstep,
                                        **attrs)

                    # for compat with numpy / Int64Index
                    # even if we can represent as a RangeIndex, return
                    # as a Float64Index if we have float-like descriptors
                    if not all(is_integer(x) for x in
                               [rstart, rstop, rstep]):
                        result = result.astype('float64')

                    return result

                except (ValueError, TypeError, AttributeError,
                        ZeroDivisionError):
                    # Defer to Int64Index implementation
                    if reversed:
                        return op(other, self._int64index)
                    return op(self._int64index, other)
Exemplo n.º 24
0
def worm_path_curvature(x, y, fps, ventral_mode):
    """
    Parameters:
    -----------
    x : 
      Worm skeleton x coordinates, []

    """

    # https://github.com/JimHokanson/SegwormMatlabClasses/blob/master/%2Bseg_worm/%2Bfeatures/%40path/wormPathCurvature.m

    BODY_I = slice(44, 3, -1)

    # This was nanmean but I think mean will be fine. nanmean was
    # causing the program to crash
    diff_x = np.mean(np.diff(x[BODY_I, :], axis=0), axis=0)
    diff_y = np.mean(np.diff(y[BODY_I, :], axis=0), axis=0)
    avg_body_angles_d = np.arctan2(diff_y, diff_x) * 180 / np.pi

    # NOTE: This is what is in the MRC code, but differs from their description.
    # In this case I think the skeleton filtering makes sense so we'll keep it.
    speed, ignored_variable, motion_direction = \
        velocity_module.compute_velocity(x[BODY_I, :], y[BODY_I,:], \
                                         avg_body_angles_d, config.BODY_DIFF, ventral_mode)

    frame_scale = velocity_module.get_frames_per_sample(config.BODY_DIFF)
    half_frame_scale = (frame_scale - 1) / 2

    # Compute the angle differentials and distances.
    speed = np.abs(speed)

    # At each frame, we'll compute the differences in motion direction using
    # some frame in the future relative to the current frame
    #
    #i.e. diff_motion[current_frame] = motion_direction[current_frame + frame_scale] - motion_direction[current_frame]
    #------------------------------------------------
    diff_motion = np.empty(speed.shape)
    diff_motion[:] = np.NAN

    right_max_I = len(diff_motion) - frame_scale
    diff_motion[0:(right_max_I + 1)] = motion_direction[(frame_scale - 1):] - motion_direction[0:(right_max_I + 1)]

    with np.errstate(invalid='ignore'):
        diff_motion[diff_motion >= 180] -= 360
        diff_motion[diff_motion <= -180] += 360

    distance_I_base = slice(half_frame_scale, -(frame_scale + 1), 1)
    distance_I_shifted = slice(half_frame_scale + frame_scale, -1, 1)

    distance = np.empty(speed.shape)
    distance[:] = np.NaN

    distance[distance_I_base] = speed[distance_I_base] + \
        speed[distance_I_shifted] * config.BODY_DIFF / 2

    with np.errstate(invalid='ignore'):
        distance[distance < 1] = np.NAN

    return (diff_motion / distance) * (np.pi / 180)
Exemplo n.º 25
0
 def test_comparison_protected_from_errstate(self):
     missing_df = tm.makeDataFrame()
     missing_df.iloc[0]['A'] = np.nan
     with np.errstate(invalid='ignore'):
         expected = missing_df.values < 0
     with np.errstate(invalid='raise'):
         result = (missing_df < 0).values
     tm.assert_numpy_array_equal(result, expected)
Exemplo n.º 26
0
 def test_special(self):
     assert_equal(ncu.log1p(np.nan), np.nan)
     assert_equal(ncu.log1p(np.inf), np.inf)
     with np.errstate(divide="ignore"):
         assert_equal(ncu.log1p(-1.0), -np.inf)
     with np.errstate(invalid="ignore"):
         assert_equal(ncu.log1p(-2.0), np.nan)
         assert_equal(ncu.log1p(-np.inf), np.nan)
Exemplo n.º 27
0
Arquivo: pbc.py Projeto: ncrubin/pyscf
def get_coulG(cell, k=np.zeros(3), exx=False, mf=None):
    '''Calculate the Coulomb kernel for all G-vectors, handling G=0 and exchange.

    Args:
        cell : instance of :class:`Cell`
        k : (3,) ndarray
        exx : bool
            Whether this is an exchange matrix element.
        mf : instance of :class:`SCF`

    Returns:
        coulG : (ngs,) ndarray
            The Coulomb kernel.

    '''
    kG = k + cell.Gv
    absG2 = np.einsum('gi,gi->g', kG, kG)

    try:
        kpts = mf.kpts
    except AttributeError:
        kpts = k.reshape(1,3)
    Nk = len(kpts)

    if exx is False or mf.exxdiv is None:
        with np.errstate(divide='ignore'):
            coulG = 4*np.pi/absG2
        if np.linalg.norm(k) < 1e-8:
            coulG[0] = 0.
    elif mf.exxdiv == 'vcut_sph':
        Rc = (3*Nk*cell.vol/(4*np.pi))**(1./3)
        with np.errstate(divide='ignore',invalid='ignore'):
            coulG = 4*np.pi/absG2*(1.0 - np.cos(np.sqrt(absG2)*Rc))
        if np.linalg.norm(k) < 1e-8:
            coulG[0] = 4*np.pi*0.5*Rc**2
    elif mf.exxdiv == 'ewald':
        with np.errstate(divide='ignore'):
            coulG = 4*np.pi/absG2
        if np.linalg.norm(k) < 1e-8:
            coulG[0] = Nk*cell.vol*madelung(cell, kpts)
    elif mf.exxdiv == 'vcut_ws':
        if mf.exx_built == False:
            mf.precompute_exx()
        with np.errstate(divide='ignore',invalid='ignore'):
            coulG = 4*np.pi/absG2*(1.0 - np.exp(-absG2/(4*mf.exx_alpha**2))) + 0j
        if np.linalg.norm(k) < 1e-8:
            coulG[0] = np.pi / mf.exx_alpha**2
        # Index k+cell.Gv into the precomputed vq and add on
        gxyz = np.round(np.dot(kG, mf.exx_kcell.h)/(2*np.pi)).astype(int)
        ngs = 2*mf.exx_kcell.gs+1
        gxyz = (gxyz + ngs)%(ngs)
        qidx = (gxyz[:,0]*ngs[1] + gxyz[:,1])*ngs[2] + gxyz[:,2]
        #qidx = [np.linalg.norm(mf.exx_q-kGi,axis=1).argmin() for kGi in kG]
        maxqv = abs(mf.exx_q).max(axis=0)
        is_lt_maxqv = (abs(kG) <= maxqv).all(axis=1)
        coulG += mf.exx_vq[qidx] * is_lt_maxqv

    return coulG
Exemplo n.º 28
0
 def test_invalid(self):
     with np.errstate(all='raise', under='ignore'):
         a = -np.arange(3)
         # This should work
         with np.errstate(invalid='ignore'):
             np.sqrt(a)
         # While this should fail!
         with assert_raises(FloatingPointError):
             np.sqrt(a)
Exemplo n.º 29
0
 def test_no_scaling(self):
     # Test writing image converting types when not calculating scaling
     img_class = self.image_class
     hdr_class = img_class.header_class
     hdr = hdr_class()
     supported_types = supported_np_types(hdr)
     # Any old non-default slope and intercept
     slope = 2
     inter = 10 if hdr.has_data_intercept else 0
     for in_dtype, out_dtype in itertools.product(
         FLOAT_TYPES + IUINT_TYPES,
         supported_types):
         # Need to check complex scaling
         mn_in, mx_in = _dt_min_max(in_dtype)
         arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype)
         img = img_class(arr, np.eye(4), hdr)
         img.set_data_dtype(out_dtype)
         # Setting the scaling means we don't calculate it later
         img.header.set_slope_inter(slope, inter)
         with np.errstate(invalid='ignore'):
             rt_img = bytesio_round_trip(img)
         with suppress_warnings():  # invalid mult
             back_arr = rt_img.get_data()
         exp_back = arr.copy()
         # If converting to floating point type, casting is direct.
         # Otherwise we will need to do float-(u)int casting at some point
         if out_dtype in IUINT_TYPES:
             if in_dtype in FLOAT_TYPES:
                 # Working precision is (at least) float
                 exp_back = exp_back.astype(float)
                 # Float to iu conversion will always round, clip
                 with np.errstate(invalid='ignore'):
                     exp_back = np.round(exp_back)
                 if in_dtype in FLOAT_TYPES:
                     # Clip to shared range of working precision
                     exp_back = np.clip(exp_back,
                                        *shared_range(float, out_dtype))
             else:  # iu input and output type
                 # No scaling, never gets converted to float.
                 # Does get clipped to range of output type
                 mn_out, mx_out = _dt_min_max(out_dtype)
                 if (mn_in, mx_in) != (mn_out, mx_out):
                     # Use smaller of input, output range to avoid np.clip
                     # upcasting the array because of large clip limits.
                     exp_back = np.clip(exp_back,
                                        max(mn_in, mn_out),
                                        min(mx_in, mx_out))
         if out_dtype in COMPLEX_TYPES:
             # always cast to real from complex
             exp_back = exp_back.astype(out_dtype)
         else:
             # Cast to working precision
             exp_back = exp_back.astype(float)
         # Allow for small differences in large numbers
         with suppress_warnings():  # invalid value
             assert_allclose_safely(back_arr,
                                    exp_back * slope + inter)
Exemplo n.º 30
0
 def test_divide(self):
     with np.errstate(all='raise', under='ignore'):
         a = -np.arange(3)
         # This should work
         with np.errstate(divide='ignore'):
             a // 0
         # While this should fail!
         with assert_raises(FloatingPointError):
             a // 0
Exemplo n.º 31
0
    def fit(self, X, y, copy_X=True):
        """Fit the model using X, y as training data.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            training data.

        y : array-like, shape (n_samples,)
            target values.
    
        copy_X : boolean, optional, default True
            If ``True``, X will be copied; else, it may be overwritten.

        Returns
        -------
        self : object
            returns an instance of self.
        """
        self.fit_path = True
        X, y = check_X_y(X, y, multi_output=True, y_numeric=True)

        X, y, Xmean, ymean, Xstd = LinearModel._center_data(
            X, y, self.fit_intercept, self.normalize, self.copy_X)
        max_iter = self.max_iter

        Gram = self._get_gram()

        alphas_, active_, coef_path_, self.n_iter_ = lars_path(
            X,
            y,
            Gram=Gram,
            copy_X=copy_X,
            copy_Gram=True,
            alpha_min=0.0,
            method='lasso',
            verbose=self.verbose,
            max_iter=max_iter,
            eps=self.eps,
            return_n_iter=True)

        n_samples = X.shape[0]

        if self.criterion == 'aic':
            K = 2  # AIC
        elif self.criterion == 'bic':
            K = log(n_samples)  # BIC
        else:
            raise ValueError('criterion should be either bic or aic')

        R = y[:, np.newaxis] - np.dot(X, coef_path_)  # residuals
        mean_squared_error = np.mean(R**2, axis=0)

        df = np.zeros(coef_path_.shape[1], dtype=np.int)  # Degrees of freedom
        for k, coef in enumerate(coef_path_.T):
            mask = np.abs(coef) > np.finfo(coef.dtype).eps
            if not np.any(mask):
                continue
            # get the number of degrees of freedom equal to:
            # Xc = X[:, mask]
            # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
            df[k] = np.sum(mask)

        self.alphas_ = alphas_
        with np.errstate(divide='ignore'):
            self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
        n_best = np.argmin(self.criterion_)

        self.alpha_ = alphas_[n_best]
        self.coef_ = coef_path_[:, n_best]
        self._set_intercept(Xmean, ymean, Xstd)
        return self
Exemplo n.º 32
0
 def within_tol(x, y, atol, rtol):
     with np.errstate(invalid='ignore'):
         result = np.less_equal(abs(x - y), atol + rtol * abs(y))
     if np.isscalar(a) and np.isscalar(b):
         result = bool(result)
     return result
 def _check_ninf_nan(dummy):
     msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
     with np.errstate(invalid='ignore'):
         z = f(np.array(complex(-np.inf, np.nan)))
         if z.real != 0 or z.imag != 0:
             raise AssertionError(msgform % (z.real, z.imag))
Exemplo n.º 34
0
def silhouette_samples(X, labels, metric='euclidean', **kwds):
    """Compute the Silhouette Coefficient for each sample.

    The Silhouette Coefficient is a measure of how well samples are clustered
    with samples that are similar to themselves. Clustering models with a high
    Silhouette Coefficient are said to be dense, where samples in the same
    cluster are similar to each other, and well separated, where samples in
    different clusters are not very similar to each other.

    The Silhouette Coefficient is calculated using the mean intra-cluster
    distance (``a``) and the mean nearest-cluster distance (``b``) for each
    sample.  The Silhouette Coefficient for a sample is ``(b - a) / max(a,
    b)``.
    Note that Silhouette Coefficient is only defined if number of labels
    is 2 <= n_labels <= n_samples - 1.

    This function returns the Silhouette Coefficient for each sample.

    The best value is 1 and the worst value is -1. Values near 0 indicate
    overlapping clusters.

    Read more in the :ref:`User Guide <silhouette_coefficient>`.

    Parameters
    ----------
    X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
             [n_samples_a, n_features] otherwise
        Array of pairwise distances between samples, or a feature array.

    labels : array, shape = [n_samples]
             label values for each sample

    metric : string, or callable
        The metric to use when calculating distance between instances in a
        feature array. If metric is a string, it must be one of the options
        allowed by :func:`mrex.metrics.pairwise.pairwise_distances`. If X is
        the distance array itself, use "precomputed" as the metric. Precomputed
        distance matrices must have 0 along the diagonal.

    `**kwds` : optional keyword parameters
        Any further parameters are passed directly to the distance function.
        If using a ``scipy.spatial.distance`` metric, the parameters are still
        metric dependent. See the scipy docs for usage examples.

    Returns
    -------
    silhouette : array, shape = [n_samples]
        Silhouette Coefficient for each samples.

    References
    ----------

    .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
       Interpretation and Validation of Cluster Analysis". Computational
       and Applied Mathematics 20: 53-65.
       <https://www.sciencedirect.com/science/article/pii/0377042787901257>`_

    .. [2] `Wikipedia entry on the Silhouette Coefficient
       <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_

    """
    X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])

    # Check for non-zero diagonal entries in precomputed distance matrix
    if metric == 'precomputed':
        atol = np.finfo(X.dtype).eps * 100
        if np.any(np.abs(np.diagonal(X)) > atol):
            raise ValueError(
                'The precomputed distance matrix contains non-zero '
                'elements on the diagonal. Use np.fill_diagonal(X, 0).')

    le = LabelEncoder()
    labels = le.fit_transform(labels)
    n_samples = len(labels)
    label_freqs = np.bincount(labels)
    check_number_of_labels(len(le.classes_), n_samples)

    kwds['metric'] = metric
    reduce_func = functools.partial(_silhouette_reduce,
                                    labels=labels,
                                    label_freqs=label_freqs)
    results = zip(
        *pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds))
    intra_clust_dists, inter_clust_dists = results
    intra_clust_dists = np.concatenate(intra_clust_dists)
    inter_clust_dists = np.concatenate(inter_clust_dists)

    denom = (label_freqs - 1).take(labels, mode='clip')
    with np.errstate(divide="ignore", invalid="ignore"):
        intra_clust_dists /= denom

    sil_samples = inter_clust_dists - intra_clust_dists
    with np.errstate(divide="ignore", invalid="ignore"):
        sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
    # nan values are for clusters of size 1, and should be 0
    return np.nan_to_num(sil_samples)
Exemplo n.º 35
0
 def log(self):  # just use base?
     """Return the natural log of F:      G = F.log()  =>  G(x) = log( F(x) ) for all x"""
     with np.errstate(divide='ignore'):
         return FactorSparse().__build(
             self.v.copy(), {l: np.log(x)
                             for l, x in self.t.items()})
Exemplo n.º 36
0
 def logIP(self):  # just use base?
     """Take the natural log of F:       F.logIP()  =>  F(x) <- log( F(x) )  (in-place)"""
     with np.errstate(divide='ignore'):
         for l, x in self.t.items():
             self.t[l] = np.log(x)
     return self
Exemplo n.º 37
0
 def __div__(self, that):
     """Division of factors, e.g.,  G(x_1,x_2) = F1(x_1) / F2(x_2)"""
     with np.errstate(divide='ignore'):
         return self.__opExpand2(that, np.divide)
Exemplo n.º 38
0
def llh(actual_values, expected_values):
    """Compute the log-likelihoods (llh) that each count in `actual_values`
    came from the the corresponding expected value in `expected_values`.

    Parameters
    ----------
    actual_values, expected_values : numpy.ndarrays of same shape

    Returns
    -------
    llh : numpy.ndarray of same shape as the inputs
        llh corresponding to each pair of elements in `actual_values` and
        `expected_values`.

    Notes
    -----
    * Uncertainties are not propagated through this calculation.
    * Values in `expected_values` are clipped to the range [SMALL_POS, inf]
      prior to the calculation to avoid infinities due to the log function.

    """
    assert actual_values.shape == expected_values.shape

    # Convert to simple numpy arrays containing floats
    if not isbarenumeric(actual_values):
        actual_values = unp.nominal_values(actual_values)
    if not isbarenumeric(expected_values):
        expected_values = unp.nominal_values(expected_values)

    with np.errstate(invalid='ignore'):
        # Mask off any nan expected values (these are assumed to be ok)
        actual_values = np.ma.masked_invalid(actual_values)
        expected_values = np.ma.masked_invalid(expected_values)

        # TODO: How should we handle nan / masked values in the "data"
        # (actual_values) distribution? How about negative numbers?

        # Make sure actual values (aka "data") are valid -- no infs, no nans,
        # etc.
        if np.any((actual_values < 0) | ~np.isfinite(actual_values)):
            msg = (
                '`actual_values` must be >= 0 and neither inf nor nan...\n' +
                maperror_logmsg(actual_values))
            raise ValueError(msg)

        # Check that new array contains all valid entries
        if np.any(expected_values < 0.0):
            msg = ('`expected_values` must all be >= 0...\n' +
                   maperror_logmsg(expected_values))
            raise ValueError(msg)

        # Replace 0's with small positive numbers to avoid inf in log
        np.clip(expected_values,
                a_min=SMALL_POS,
                a_max=np.inf,
                out=expected_values)

    #
    # natural logarith m of the Poisson probability
    # (uses Stirling's approximation to estimate ln(k!) ~ kln(k)-k)
    #
    llh_val = actual_values * np.log(expected_values) - expected_values
    llh_val -= actual_values * np.log(actual_values) - actual_values

    return llh_val
Exemplo n.º 39
0
def chi2(actual_values, expected_values):
    """Compute the chi-square between each value in `actual_values` and
    `expected_values`.

    Parameters
    ----------
    actual_values, expected_values : numpy.ndarrays of same shape

    Returns
    -------
    chi2 : numpy.ndarray of same shape as inputs
        chi-squared values corresponding to each pair of elements in the inputs

    Notes
    -----
    * Uncertainties are not propagated through this calculation.
    * Values in expectation are clipped to the range [SMALL_POS, inf] prior to
      the calculation to avoid infinities due to the divide function.
    * actual_values are allowed to be = 0, since they don't com up in the denominator
    """
    if actual_values.shape != expected_values.shape:
        raise ValueError('Shape mismatch: actual_values.shape = %s,'
                         ' expected_values.shape = %s' %
                         (actual_values.shape, expected_values.shape))

    # Convert to simple numpy arrays containing floats
    if not isbarenumeric(actual_values):
        actual_values = unp.nominal_values(actual_values)
    if not isbarenumeric(expected_values):
        expected_values = unp.nominal_values(expected_values)

    with np.errstate(invalid='ignore'):
        # Mask off any nan expected values (these are assumed to be ok)
        actual_values = np.ma.masked_invalid(actual_values)
        expected_values = np.ma.masked_invalid(expected_values)

        # TODO: this check (and the same for `actual_values`) should probably
        # be done elsewhere... maybe?
        if np.any(actual_values < 0):
            msg = ('`actual_values` must all be >= 0...\n' +
                   maperror_logmsg(actual_values))
            raise ValueError(msg)

        if np.any(expected_values < 0):
            msg = ('`expected_values` must all be >= 0...\n' +
                   maperror_logmsg(expected_values))
            raise ValueError(msg)

        # TODO: Is this okay to do? Mathematically suspect at best, and can
        #       still destroy a minimizer's hopes and dreams...

        # Replace 0's with small positive numbers to avoid inf in division
        np.clip(expected_values,
                a_min=SMALL_POS,
                a_max=np.inf,
                out=expected_values)

        delta = actual_values - expected_values

    if np.all(np.abs(delta) < 5 * FTYPE_PREC):
        return np.zeros_like(delta, dtype=FTYPE)

    chi2_val = np.square(delta) / expected_values
    assert np.all(chi2_val >= 0), str(chi2_val[chi2_val < 0])
    return chi2_val
Exemplo n.º 40
0
def f2(x):
    with np.errstate(divide='ignore', invalid='ignore'):
        return 1/x
Exemplo n.º 41
0
def ufunc_functional_factory(name, nargin, nargout, docstring):
    """Create a ufunc `Functional` from a given specification."""

    assert 0 <= nargin <= 2

    def __init__(self, field):
        """Initialize an instance.

        Parameters
        ----------
        field : `Field`
            The domain of the functional.
        """
        if not isinstance(field, Field):
            raise TypeError('`field` {!r} not a `Field`'.format(space))

        if _is_integer_only_ufunc(name):
            raise ValueError("ufunc '{}' only defined with integral dtype"
                             "".format(name))

        linear = name in LINEAR_UFUNCS
        Functional.__init__(self, space=field, linear=linear)

    def _call(self, x):
        """Return ``self(x)``."""
        if nargin == 1:
            return getattr(np, name)(x)
        else:
            return getattr(np, name)(*x)

    def __repr__(self):
        """Return ``repr(self)``."""
        return '{}({!r})'.format(name, self.domain)

    # Create example (also functions as doctest)

    if nargin != 1:
        raise NotImplementedError('Currently not suppored')

    if nargout != 1:
        raise NotImplementedError('Currently not suppored')

    space = RealNumbers()
    val = 1.0
    arg = '{}'.format(val)
    with np.errstate(all='ignore'):
        result = np.float64(getattr(np, name)(val))

    examples_docstring = RAW_EXAMPLES_DOCSTRING.format(space=space,
                                                       name=name,
                                                       arg=arg,
                                                       result=result)
    full_docstring = docstring + examples_docstring

    attributes = {
        "__init__": __init__,
        "_call": _call,
        "gradient": property(gradient_factory(name)),
        "__repr__": __repr__,
        "__doc__": full_docstring
    }

    full_name = name + '_op'

    return type(full_name, (Functional, ), attributes)
Exemplo n.º 42
0
def f3(x):
    with np.errstate(divide='ignore', invalid='ignore'):
        return np.float_power(x, -2)
Exemplo n.º 43
0
    def _run_trajectory(self):#!!!!!!!!!!!!!!!!!renamed run to solve(big deal)
        x=self.xi
        t=self.ti
        __n=len(x)
        self.time=self.gettvec()
        data=np.zeros((len(self.xi),self.ptimes))
        ip=0

        if self.type=='linear':
            if self.time_variant == False:
                while t<self.tf:
                    rate=np.atleast_2d(np.dot(self.W1,x))+self.W0
                    rate=np.cumsum(rate)
                    with np.errstate(divide='ignore', invalid='ignore'):
                        t=(t-np.log(np.random.rand(1))/rate[-1])
                    ro=rate[-1]*np.random.rand()
                    while t>self.time[ip]:
                        if t>self.tf:
                            b = len(self.time[ip:])
                            fill = np.repeat(x,b)
                            data[:,ip:]=fill.reshape(__n,b)
                            return data
                        else:
                            data[:,ip]=x.reshape(__n)
                            ip=ip+1
                    for i in range(len(rate)):
                        if rate[i]>=ro:
                            event=i
                            break
                    x=x+np.atleast_2d(self.S[:,event]).T

            else:


                x = np.concatenate((np.array([0]),self.xi.flatten()))

                #x = self.xi
                __n=len(x)
                self.time=self.gettvec()
                data=np.zeros((len(self.xi),self.ptimes))
                a,b = self.S.shape
                S = np.vstack((np.zeros(b),self.S))
                S = np.hstack((np.zeros((a+1,1)),S))
                while t<self.tf:
                    __n=len(x)
                    self.time=self.gettvec()
                    data=np.zeros((len(self.xi),self.ptimes))
                    a,b = self.S.shape
                    S = np.vstack((np.zeros(b),self.S))
                    S = np.hstack((np.zeros((a+1,1)),S))
                    while t<self.tf:
                        trate=self.get_P(x[1:],t)

                        rate = np.concatenate((np.array([self.fast_rxn]),trate))
                        rate=np.cumsum(rate)


                        t=(t-np.log(np.random.rand(1))/rate[-1])
                        ro=rate[-1]*np.random.rand()

                        while t>self.time[ip]:
                            if t>self.tf:
                                b = len(self.time[ip:])
                                fill = np.repeat(x[1:],b)
                                data[:,ip:]=fill.reshape(__n-1,b)
                                return data
                            else:
                                #data[:,ip]=x.reshape(__n)
                                data[:,ip]=x[1:]
                                ip=ip+1
                        for i in range(len(rate)):
                            if rate[i]>=ro:
                                event=i

                                break

                        x=x+S[:,event].ravel()
                    '''
                    rate=np.atleast_2d(np.dot(self.W1(t),x))+self.W0(t)

                    rate=np.cumsum(rate)

                    t=(t-np.log(np.random.rand(1))/rate[-1])
                    print(t)
                    ro=rate[-1]*np.random.rand()
                    while t>self.time[ip]:
                        if t>self.tf:
                            b = len(self.time[ip:])
                            fill = np.repeat(x,b)
                            data[:,ip:]=fill.reshape(__n,b)
                            return data
                        else:
                            data[:,ip]=x.reshape(__n)
                            ip=ip+1
                    for i in range(len(rate)):
                        if rate[i]>=ro:
                            event=i
                            break
                    x=x+np.atleast_2d(self.S[:,event]).T

                    '''



        elif self.type=='nonlinear':
            if self.time_variant == True:  #if time variant use fast reaction
                x = np.concatenate((np.array([0]),self.xi.flatten()))

                #x = self.xi
                __n=len(x)
                self.time = self.gettvec()
                data = np.zeros((len(self.xi), self.ptimes))
                a, b = self.S.shape
                S = np.vstack((np.zeros(b), self.S))
                S = np.hstack((np.zeros((a+1, 1)), S))
                while t < self.tf:
                    trate=self.get_P(x[1:],t)
                    rate = np.concatenate((np.array([self.fast_rxn]),trate))
                    rate=np.cumsum(rate)

                    t=(t-np.log(np.random.rand(1))/rate[-1])
                    ro=rate[-1]*np.random.rand()

                    while t>self.time[ip]:
                        if t>self.tf:
                            b = len(self.time[ip:])
                            fill = np.repeat(x[1:],b)
                            data[:,ip:]=fill.reshape(__n-1,b)
                            return data
                        else:
                            #data[:,ip]=x.reshape(__n)
                            data[:,ip]=x[1:]
                            ip=ip+1
                    for i in range(len(rate)):
                        if rate[i]>=ro:
                            event=i

                            break

                    x=x+S[:,event].ravel()

            else:   #if not time variant ignore fast reaction

                x = self.xi.flatten()

                #x = self.xi
                __n=len(x)
                self.time=self.gettvec()

                while t<self.tf:
                    rate=self.get_P(x,t)

                    rate=np.cumsum(rate)

                    t=(t-np.log(np.random.rand(1))/rate[-1])
                    ro=rate[-1]*np.random.rand()

                    while t>self.time[ip]:
                        if t>self.tf:
                            b = len(self.time[ip:])
                            fill = np.repeat(x,b)
                            data[:,ip:]=fill.reshape(__n,b)
                            return data
                        else:
                            #data[:,ip]=x.reshape(__n)
                            data[:,ip]=x
                            ip=ip+1
                    for i in range(len(rate)):
                        if rate[i]>=ro:
                            event=i

                            break

                    x=x+self.S[:,event].ravel()



        else:
            'Error'
        self.data=data
        return data
Exemplo n.º 44
0
 def __idiv__(self, that):
     """In-place divide, F1 /= F2.  Most efficient if F2.vars <= F1.vars"""
     with np.errstate(divide='ignore'):
         return self.__opExpand2(that, np.divide, out=self)
Exemplo n.º 45
0
 def __rdiv__(self, that):
     """Right-divide, e.g. G(x) = 3.0 / F(x)"""
     B = that if isinstance(that, FactorSparse) else FactorSparse([], that)
     with np.errstate(divide='ignore'):
         return B.__opExpand2(self, np.divide)
Exemplo n.º 46
0
def mcllh_mean(actual_values, expected_values):
    """Compute the log-likelihood (llh) based on LMean in table 2 - https://doi.org/10.1007/JHEP06(2019)030
    accounting for finite MC statistics.
    This is the second most recommended likelihood in the paper.

    Parameters
    ----------
    actual_values, expected_values : numpy.ndarrays of same shape

    Returns
    -------
    llh : numpy.ndarray of same shape as the inputs
        llh corresponding to each pair of elements in `actual_values` and
        `expected_values`.

    Notes
    -----
    *
    """
    assert actual_values.shape == expected_values.shape

    # Convert to simple numpy arrays containing floats
    actual_values = unp.nominal_values(actual_values).ravel()
    sigma = unp.std_devs(expected_values).ravel()
    expected_values = unp.nominal_values(expected_values).ravel()

    with np.errstate(invalid='ignore'):
        # Mask off any nan expected values (these are assumed to be ok)
        actual_values = np.ma.masked_invalid(actual_values)
        expected_values = np.ma.masked_invalid(expected_values)

        # TODO: How should we handle nan / masked values in the "data"
        # (actual_values) distribution? How about negative numbers?

        # Make sure actual values (aka "data") are valid -- no infs, no nans,
        # etc.
        if np.any((actual_values < 0) | ~np.isfinite(actual_values)):
            msg = (
                '`actual_values` must be >= 0 and neither inf nor nan...\n' +
                maperror_logmsg(actual_values))
            raise ValueError(msg)

        # Check that new array contains all valid entries
        if np.any(expected_values < 0.0):
            msg = ('`expected_values` must all be >= 0...\n' +
                   maperror_logmsg(expected_values))
            raise ValueError(msg)

        # Replace 0's with small positive numbers to avoid inf in log
        np.clip(expected_values,
                a_min=SMALL_POS,
                a_max=np.inf,
                out=expected_values)

    llh_val = likelihood_functions.poisson_gamma(data=actual_values,
                                                 sum_w=expected_values,
                                                 sum_w2=sigma**2,
                                                 a=0,
                                                 b=0)

    return llh_val
Exemplo n.º 47
0
 def log10IP(self):
     """Take the log base 10 of F:       F.log10IP()  =>  F(x) <- log10( F(x) )  (in-place)"""
     with np.errstate(divide='ignore'):
         for l, x in self.t.items():
             self.t[l] = np.log10(x)
     return self
Exemplo n.º 48
0
 def test_neginf(self):
     with np.errstate(divide='ignore'):
         assert_all(np.isnan(np.array((-1.,))/0.) == 0)
Exemplo n.º 49
0
 def log10(self):
     """Return the log base 10 of F:      G = F.log10()  =>  G(x) = log10( F(x) ) for all x"""
     with np.errstate(divide='ignore'):
         return FactorSparse().__build(
             self.v.copy(), {l: np.log10(x)
                             for l, x in self.t.items()})
Exemplo n.º 50
0
def nv_binary_clf_curve_test():
    N = np.random.randint(low=1, high=10)

    y_bool = np.random.rand(N) <= 0.5
    y_pred = np.random.rand(N)

    sample_weight = None
    if np.random.rand() <= 0.2:
        sample_weight = np.abs(np.random.randn(N))
    if np.random.rand() <= 0.2:
        sample_weight = 1 + np.random.multinomial(N, np.ones(N) / N)
    if np.random.rand() <= 0.2:
        sample_weight = np.maximum(np.random.multinomial(N,
                                                         np.ones(N) / N), 1e-6)

    fps, tps, thresholds = _nv_binary_clf_curve(y_bool, y_pred, sample_weight)
    assert (fps.shape == tps.shape and fps.shape == thresholds.shape)
    assert (np.all(np.isfinite(fps)))
    assert (np.all(np.isfinite(tps)))
    assert (np.all(np.isfinite(thresholds[1:])))
    assert (fps[0] == 0 and tps[0] == 0 and thresholds[0] == np.inf)
    if sample_weight is None:
        assert (np.abs(fps[-1] - np.sum(~y_bool)) <= 1e-8)
        assert (np.abs(tps[-1] - np.sum(y_bool)) <= 1e-8)
    else:
        assert (np.abs(fps[-1] - np.sum(sample_weight * ~y_bool)) <= 1e-8)
        assert (np.abs(tps[-1] - np.sum(sample_weight * y_bool)) <= 1e-8)
    assert (np.all((np.diff(fps) >= 0.0) & (np.diff(tps) >= 0.0)))
    assert (np.all((np.diff(fps) > 0) | (np.diff(tps) > 0)))
    assert (np.all(np.diff(thresholds) < 0.0))

    fpr, tpr, thresholds_roc = _nv_roc_curve(y_bool, y_pred, sample_weight)
    assert (fpr.shape == tpr.shape and fpr.shape == thresholds_roc.shape)
    assert (np.all(np.isfinite(fpr)))
    assert (np.all(np.isfinite(tpr)))
    assert (np.all(np.isfinite(thresholds_roc[1:])))
    assert (fpr[0] == 0.0 and tpr[0] == 0.0)
    assert (fpr[-1] == 1.0 and tpr[-1] == 1.0)
    assert (np.all((np.diff(fpr) >= 0.0) & (np.diff(tpr) >= 0.0)))
    assert (np.all((np.diff(fpr) > 0.0) | (np.diff(tpr) > 0.0)))
    assert (np.all(np.diff(thresholds_roc) < 0.0))

    rec, prec, thresholds_pr = _nv_recall_precision_curve(
        y_bool, y_pred, sample_weight)
    assert (rec.shape == prec.shape and rec.shape == thresholds_pr.shape)
    assert (np.all(np.isfinite(rec)))
    assert (np.all(np.isfinite(prec)))
    assert (np.all(np.isfinite(thresholds_pr[1:])))
    assert (rec[0] == 0.0 and rec[-1] == 1.0)
    assert (len(prec) >= 2 and prec[0] == prec[1])
    b_rate = np.mean(y_bool) if sample_weight is None else \
        np.true_divide(np.sum(sample_weight * y_bool), np.sum(sample_weight))
    assert (np.max(np.abs(prec[-1] - b_rate)) <= 1e-8)
    # Note: may have repeats in PR curve
    assert (np.all(np.diff(rec) >= 0.0))
    assert (np.all(np.diff(thresholds_pr) < 0.0))

    rec_gain, prec_gain, thresholds_prg = _nv_prg_curve(
        y_bool, y_pred, sample_weight)
    assert (rec_gain.shape == prec_gain.shape)
    assert (rec_gain.shape == thresholds_prg.shape)
    assert (np.all(np.isfinite(thresholds_prg[1:])))
    assert (rec_gain[0] == 0.0 and rec_gain[-1] == 1.0)
    assert (np.all(rec_gain <= 1.0) and np.all(prec_gain <= 1.0))
    assert (np.all(np.diff(rec_gain) >= 0.0))
    assert (np.allclose(prec_gain[-1], 0.0))

    if np.all(y_bool) or (not np.any(y_bool)):
        assert (np.allclose(0.5, np.trapz(fpr, tpr)))
        assert (np.allclose(np.mean(y_bool), np.sum(prec[:-1] * np.diff(rec))))
        assert (np.allclose(0.0, np.sum(prec_gain[:-1] * np.diff(rec_gain))))
        return

    fps2, tps2, thresholds2 = _binary_clf_curve(y_bool,
                                                y_pred,
                                                pos_label=True,
                                                sample_weight=sample_weight)
    assert (np.allclose(fps[1:], fps2))
    assert (np.allclose(tps[1:], tps2))
    assert (np.allclose(thresholds[1:], thresholds2))

    fpr2, tpr2, thresholds2 = roc_curve(y_bool,
                                        y_pred,
                                        pos_label=True,
                                        sample_weight=sample_weight,
                                        drop_intermediate=False)
    # sklearn inconsistent on including origin ==> need if statement
    if len(fpr) == len(fpr2):
        assert (np.allclose(fpr, fpr2))
        assert (np.allclose(tpr, tpr2))
        assert (np.allclose(thresholds_roc[1:], thresholds2[1:]))
    else:
        assert (np.allclose(fpr[1:], fpr2))
        assert (np.allclose(tpr[1:], tpr2))
        assert (np.allclose(thresholds_roc[1:], thresholds2))

    prec2, rec2, thresholds2 = \
        precision_recall_curve(y_bool, y_pred, pos_label=True,
                               sample_weight=sample_weight)
    prec2, rec2, thresholds2 = prec2[::-1], rec2[::-1], thresholds2[::-1]
    prec2[0] = prec2[1]
    err = rec[len(rec2):] - 1.0
    assert (len(err) == 0 or np.max(np.abs(err)) <= 1e-8)
    assert (np.allclose(rec[:len(rec2)], rec2))
    assert (np.allclose(prec[:len(rec2)], prec2))
    assert (np.allclose(thresholds_pr[1:len(rec2)], thresholds2))

    with np.errstate(divide='ignore', invalid='ignore'):
        rec_gain2 = (rec - b_rate) / ((1.0 - b_rate) * rec)
        prec_gain2 = (prec - b_rate) / ((1.0 - b_rate) * prec)
    idx = rec_gain2 > 0.0
    assert (np.allclose(rec_gain[1:], rec_gain2[idx]))
    assert (np.allclose(prec_gain[1:], prec_gain2[idx]))
    assert (np.allclose(thresholds_prg[1:], thresholds_pr[idx]))
    assert (np.allclose(rec_gain[0], 0.0))
    idx0 = np.where(~idx)[0][-1]
    assert (np.allclose(prec_gain[0], prec_gain2[idx0]))
    assert (np.allclose(thresholds_prg[0], thresholds_pr[idx0]))
Exemplo n.º 51
0
 def test_complex1(self):
     with np.errstate(divide='ignore', invalid='ignore'):
         assert_all(np.isnan(np.array(0+0j)/0.) == 1)
Exemplo n.º 52
0
    def track(self, sess, first_bbox, frames, logdir='/tmp', write_summary=True):
        """Runs tracking on a single image sequence."""
        # Get initial target bounding box and convert to center based
        bbox = convert_bbox_format(first_bbox, 'center-based')

        # Feed in the first frame image to set initial state.
        bbox_feed = [bbox.y, bbox.x, bbox.height, bbox.width]
        input_feed = [frames[0], bbox_feed]
        frame2crop_scale = self.siamese_model.initialize(sess, input_feed)

        # Storing target state
        original_target_height = bbox.height
        original_target_width = bbox.width
        search_center = np.array([get_center(self.x_image_size),
                                  get_center(self.x_image_size)])
        current_target_state = TargetState(bbox=bbox,
                                           search_pos=search_center,
                                           scale_idx=int(get_center(self.num_scales)))

        include_first = False
        logging.info('Tracking include first -- {}'.format(include_first))

        if write_summary:
            summary_writer = tf.summary.FileWriter(
                    osp.join(logdir, 'summary'), graph=sess.graph)
            self.siamese_model.build_summary(summary_writer)

        # Run tracking loop
        reported_bboxs = []
        for i, filename in enumerate(frames):
            if i > 0 or include_first:  # We don't really want to process the first image unless intended to do so.
                bbox_feed = [current_target_state.bbox.y, current_target_state.bbox.x,
                             current_target_state.bbox.height, current_target_state.bbox.width]
                input_feed = [filename, bbox_feed]

                outputs, metadata = self.siamese_model.inference_step(sess, input_feed)
                search_scale_list = outputs['scale_xs']
                response = outputs['response']
                response_size = response.shape[1]

                # Choose the scale whole response map has the highest peak
                if self.num_scales > 1:
                    response_max = np.max(response, axis=(1, 2))
                    penalties = self.config.scale_penalty * np.ones((self.num_scales))
                    current_scale_idx = int(get_center(self.num_scales))
                    penalties[current_scale_idx] = 1.0
                    response_penalized = response_max * penalties
                    best_scale = np.argmax(response_penalized)
                else:
                    best_scale = 0

                response = response[best_scale]

                if self.update_template:
                    mmr = outputs['MMRs'][best_scale]
                    if mmr > self.config.mmr_thresh:
                        print('update templates MMRs={}'.format(mmr))
                        self.siamese_model.update(sess, input_feed)

                with np.errstate(all='raise'):  # Raise error if something goes wrong
                    response = response - np.min(response)
                    response = response / np.sum(response)

                if self.window is None:
                    window = np.dot(np.expand_dims(np.hanning(response_size), 1),
                                  np.expand_dims(np.hanning(response_size), 0))
                    self.window = window / np.sum(window)  # normalize window
                window_influence = self.config.window_influence
                response = (1 - window_influence) * response + window_influence * self.window

                # Find maximum response
                r_max, c_max = np.unravel_index(response.argmax(),
                                                response.shape)

                # Convert from crop-relative coordinates to frame coordinates
                p_coor = np.array([r_max, c_max])
                # displacement from the center in instance final representation ...
                disp_instance_final = p_coor - get_center(response_size)
                # ... in instance feature space ...
                upsample_factor = self.config.upsample_factor
                disp_instance_feat = disp_instance_final / upsample_factor
                # ... Avoid empty position ...
                r_radius = int(response_size / upsample_factor / 2)
                disp_instance_feat = np.maximum(np.minimum(disp_instance_feat, r_radius), -r_radius)
                # ... in instance input ...
                disp_instance_input = disp_instance_feat * self.config.embed_stride
                # ... in instance original crop (in frame coordinates)
                disp_instance_frame = disp_instance_input / search_scale_list[best_scale]
                # Position within frame in frame coordinates
                y = current_target_state.bbox.y
                x = current_target_state.bbox.x
                y += disp_instance_frame[0]
                x += disp_instance_frame[1]

                # Target scale damping and saturation
                target_scale = current_target_state.bbox.height / original_target_height
                search_factor = self.search_factors[best_scale]
                scale_damp = self.config.scale_damp  # damping factor for scale update
                target_scale *= ((1 - scale_damp) * 1.0 + scale_damp * search_factor)
                target_scale = np.maximum(0.2, np.minimum(5.0, target_scale))

                # Some book keeping
                height = original_target_height * target_scale
                width = original_target_width * target_scale
                current_target_state.bbox = Rectangle(x, y, width, height)
                current_target_state.scale_idx = best_scale
                current_target_state.search_pos = search_center + disp_instance_input

                assert 0 <= current_target_state.search_pos[0] < self.x_image_size, \
                  'target position in feature space should be no larger than input image size'
                assert 0 <= current_target_state.search_pos[1] < self.x_image_size, \
                  'target position in feature space should be no larger than input image size'

                if self.log_level > 0:
                    np.save(osp.join(logdir, 'num_frames.npy'), [i + 1])

                    # Select the image with the highest score scale and convert it to uint8
                    image_cropped = outputs['image_cropped'][best_scale].astype(np.uint8)
                    # Note that imwrite in cv2 assumes the image is in BGR format.
                    # However, the cropped image returned by TensorFlow is RGB.
                    # Therefore, we convert color format using cv2.cvtColor
                    imwrite(osp.join(logdir, 'image_cropped{}.jpg'.format(i)),
                          cv2.cvtColor(image_cropped, cv2.COLOR_RGB2BGR))

                    np.save(osp.join(logdir, 'best_scale{}.npy'.format(i)), [best_scale])
                    np.save(osp.join(logdir, 'response{}.npy'.format(i)), response)

                    y_search, x_search = current_target_state.search_pos
                    search_scale = search_scale_list[best_scale]
                    target_height_search = height * search_scale
                    target_width_search = width * search_scale
                    bbox_search = Rectangle(x_search, y_search, target_width_search, target_height_search)
                    bbox_search = convert_bbox_format(bbox_search, 'top-left-based')
                    np.save(osp.join(logdir, 'bbox{}.npy'.format(i)),
                          [bbox_search.x, bbox_search.y, bbox_search.width, bbox_search.height])
            reported_bbox = convert_bbox_format(current_target_state.bbox, 'top-left-based')
            reported_bboxs.append(reported_bbox)
        #--- END OF FRAME
        return reported_bboxs
Exemplo n.º 53
0
 def test_ind(self):
     with np.errstate(divide='ignore', invalid='ignore'):
         assert_all(np.isfinite(np.array((0.,))/0.) == 0)
    def predict(self, X_gmm, X_reg, mode='soft'):
        """Estimate the values of the outputs for a new set of inputs.

        Compute the expected value of y given the trained model and a set
        X of new observations.

        Parameters
        ----------
        X_gmm : array-like, shape (n_samples, n_gmm_features)
        X_reg : array-like, shape (n_samples, n_reg_features)
        mode : string, whether to make a soft or hard prediction.

        Returns
        -------
        targets : array, shape (n_samples, 1)
        """
        if not self.is_fitted_:
            raise RuntimeError("Model isn't fitted.")

        eps = 10 * np.finfo(self.resp_tr_.dtype).eps
        n, d_gmm = X_gmm.shape
        _, d_reg = X_reg.shape

        if d_gmm != self.n_gmm_dims_:
            raise ValueError("Incorrect dimensions for the GMM input data.")
        if d_reg != self.n_reg_dims_:
            raise ValueError(
                "Incorrect dimensions for the regression input data.")
        if mode not in ['soft', 'hard']:
            raise ValueError(
                "Prediction mode has to be either 'hard. or 'soft'.")

        reg_weights = self.reg_weights_
        reg_precisions = self.reg_precisions_
        if self.n_targets_ == 1:
            reg_weights = reg_weights[np.newaxis, :, :]
            reg_precisions = reg_precisions[np.newaxis, :]

        X_ext = np.concatenate((np.ones((n, 1)), X_reg), axis=1)
        targets = np.zeros((n, self.n_targets_))

        # Compute all the log-factors for the responsibility expression
        log_weights = np.log(self.weights_)
        log_prob_X = _estimate_log_prob_X(X_gmm, self.means_,
                                          self.precisions_cholesky_)

        # Compute log-responsibilities
        weighted_log_prob = log_weights + log_prob_X
        log_prob_norm = logsumexp(weighted_log_prob, axis=1)
        with np.errstate(under='ignore'):
            # ignore underflow
            log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
        resp_tst = np.exp(log_resp)
        coefs = np.copy(resp_tst)
        labels_tst = log_resp.argmax(axis=1)

        if mode == 'hard':
            # Force max responsibilities to 1, rest to 0.
            coefs = (resp_tst == resp_tst.max(axis=1)[:, None]).astype(int)

        # Compute the expected value of the predictive posterior.
        for k in range(self.n_components):
            dot_prod = np.dot(X_ext, reg_weights[:, :, k].T)
            targets += np.multiply((coefs[:, k] + eps)[:, np.newaxis],
                                   dot_prod)

        self.resp_tst_ = resp_tst
        self.labels_tst_ = labels_tst

        return targets
Exemplo n.º 55
0
    def rhs_equation(x, params, derivs):
        """
        Compute the ODEs
        """
        # pylint: disable=too-many-statements
        θ = scaled_to_rad(x, θ_scale)
        B_r = params[ODEIndex.B_r]
        B_φ = params[ODEIndex.B_φ]
        B_θ = params[ODEIndex.B_θ]
        v_r = params[ODEIndex.v_r]
        v_φ = params[ODEIndex.v_φ]
        ρ = params[ODEIndex.ρ]
        η_O = params[ODEIndex.η_O]
        η_A = params[ODEIndex.η_A]
        η_H = params[ODEIndex.η_H]
        if use_E_r:
            E_r = params[ODEIndex.E_r]
        else:
            B_φ_prime = params[ODEIndex.B_φ_prime]

        # check sanity of input values
        if ρ < 0:
            if store_internal:
                # pylint: disable=unsubscriptable-object
                problems[θ].append("negative density")
            return 1

        B_mag = sqrt(B_r**2 + B_φ**2 + B_θ**2)

        with errstate(invalid="ignore"):
            b_r, b_φ, b_θ = B_r/B_mag, B_φ/B_mag, B_θ/B_mag

        C = C_func(η_O=η_O, η_A=η_A, η_H=η_H, b_θ=b_θ, b_r=b_r, b_φ=b_φ)
        X = X_func(η_O=η_O, η_A=η_A, η_H=η_H, b_θ=b_θ, b_r=b_r, b_φ=b_φ)
        Z_5 = Z_5_func(
            η_O=η_O, η_A=η_A, η_H=η_H, b_r=b_r, b_θ=b_θ, b_φ=b_φ, C=C
        )

        deriv_B_θ = B_θ * tan(θ) - 3/4 * B_r

        if use_E_r:
            Z_8 = Z_8_func(
                θ=θ, b_r=b_r, b_θ=b_θ, b_φ=b_φ, B_φ=B_φ, η_O=η_O, η_A=η_A,
                η_H=η_H, E_r=E_r, Z_5=Z_5, C=C,
            )

            v_r = v_r_func(
                θ=θ, a_0=a_0, norm_kepler=norm_kepler, ρ=ρ, Z_5=Z_5, Z_8=Z_8,
                B_r=B_r, B_θ=B_θ, B_φ=B_φ, b_r=b_r, b_θ=b_θ, b_φ=b_φ, η_O=η_O,
                η_A=η_A, η_H=η_H, C=C, X=X,
            )

            v_φ = v_φ_func(
                θ=θ, a_0=a_0, ρ=ρ, Z_5=Z_5, Z_8=Z_8, v_r=v_r, B_r=B_r, B_θ=B_θ,
                B_φ=B_φ, C=C,
            )

            params[ODEIndex.v_r] = v_r
            params[ODEIndex.v_φ] = v_φ

            deriv_B_φ = deriv_B_φ_func(
                Z_5=Z_5, Z_8=Z_8, v_r=v_r, v_φ=v_φ, B_θ=B_θ, C=C,
            )
        else:
            deriv_B_φ = B_φ_prime

        deriv_B_r = (
            B_φ * (
                η_A * b_φ * (
                    b_r * tan(θ) -
                    b_θ / 4
                ) + η_H * (
                    b_r / 4 +
                    b_θ * tan(θ)
                )
            ) - deriv_B_φ * (
                η_H * b_θ +
                η_A * b_r * b_φ
            ) - v_r * B_θ
        ) / (
            η_O + η_A * (1 - b_φ) * (1 + b_φ)
        ) - B_θ / 4

        deriv_ρ = - ρ * v_φ ** 2 * tan(θ) - a_0 * (
            B_θ * B_r / 4 + B_r * deriv_B_r + B_φ * deriv_B_φ -
            B_φ ** 2 * tan(θ)
        )

        if η_derivs:
            deriv_η_scale = deriv_η_skw_func(
                deriv_ρ=deriv_ρ, deriv_B_θ=deriv_B_θ, ρ=ρ, B_r=B_r, B_φ=B_φ,
                B_θ=B_θ, deriv_B_r=deriv_B_r, deriv_B_φ=deriv_B_φ,
            )
            deriv_η_O = deriv_η_scale * η_O_0
            deriv_η_A = deriv_η_scale * η_A_0
            deriv_η_H = deriv_η_scale * η_H_0
        else:
            deriv_η_O = 0
            deriv_η_A = 0
            deriv_η_H = 0

        if use_E_r:
            J_r = J_r_func(θ=θ, B_φ=B_φ, deriv_B_φ=deriv_B_φ)
            J_θ = J_θ_func(γ=0, B_φ=B_φ)
            J_φ = J_φ_func(γ=0, B_θ=B_θ, deriv_B_r=deriv_B_r)

            deriv_E_r = deriv_E_r_func(
                γ=0, v_r=v_r, v_φ=v_φ, B_r=B_r, B_φ=B_φ, η_O=η_O, η_A=η_A,
                η_H=η_H, b_r=b_r, b_θ=b_θ, b_φ=b_φ, J_r=J_r, J_φ=J_φ, J_θ=J_θ,
            )
        else:
            deriv_b_r, deriv_b_φ, deriv_b_θ = B_unit_derivs(
                B_r=B_r, B_φ=B_φ, B_θ=B_θ, deriv_B_r=deriv_B_r,
                deriv_B_φ=deriv_B_φ, deriv_B_θ=deriv_B_θ, b_r=b_r, b_θ=b_θ,
                b_φ=b_φ,
            )

            A = A_func(
                η_O=η_O, η_A=η_A, η_H=η_H, b_θ=b_θ, b_r=b_r, b_φ=b_φ,
                deriv_η_O=deriv_η_O, deriv_η_A=deriv_η_A, deriv_η_H=deriv_η_H,
                deriv_b_θ=deriv_b_θ, deriv_b_r=deriv_b_r, deriv_b_φ=deriv_b_φ
            )
            X_dash = X_dash_func(
                η_O=η_O, η_A=η_A, η_H=η_H, b_θ=b_θ, b_r=b_r, b_φ=b_φ,
                deriv_η_O=deriv_η_O, deriv_η_A=deriv_η_A, deriv_η_H=deriv_η_H,
                deriv_b_θ=deriv_b_θ, deriv_b_r=deriv_b_r, deriv_b_φ=deriv_b_φ
            )

            Z_1 = Z_1_func(
                θ=θ, a_0=a_0, B_φ=B_φ, B_θ=B_θ, B_r=B_r, ρ=ρ,
                deriv_B_r=deriv_B_r, deriv_B_φ=deriv_B_φ, deriv_B_θ=deriv_B_θ,
                v_φ=v_φ, deriv_ρ=deriv_ρ
            )
            Z_2 = Z_2_func(
                θ=θ, a_0=a_0, X=X, v_r=v_r, B_φ=B_φ, B_θ=B_θ, ρ=ρ, η_A=η_A,
                η_O=η_O, η_H=η_H, b_φ=b_φ, b_r=b_r, b_θ=b_θ, X_dash=X_dash,
                deriv_B_φ=deriv_B_φ, deriv_B_θ=deriv_B_θ, deriv_b_θ=deriv_b_θ,
                deriv_b_φ=deriv_b_φ, deriv_b_r=deriv_b_r, deriv_η_O=deriv_η_O,
                deriv_η_A=deriv_η_A, deriv_η_H=deriv_η_H, deriv_ρ=deriv_ρ,
                norm_kepler=norm_kepler,
            )
            Z_3 = Z_3_func(
                v_r=v_r, B_θ=B_θ, norm_kepler=norm_kepler, η_O=η_O, η_A=η_A,
                b_φ=b_φ,
            )
            Z_4 = Z_4_func(B_θ=B_θ, B_r=B_r, B_φ=B_φ, deriv_B_φ=deriv_B_φ, θ=θ)
            Z_6 = Z_6_func(
                a_0=a_0, X=X, v_r=v_r, B_θ=B_θ, ρ=ρ, Z_5=Z_5,
                norm_kepler=norm_kepler, C=C, v_φ=v_φ, Z_4=Z_4, Z_3=Z_3,
            )
            Z_7 = Z_7_func(
                a_0=a_0, X=X, v_r=v_r, B_θ=B_θ, ρ=ρ, Z_5=Z_5,
                norm_kepler=norm_kepler, C=C, v_φ=v_φ, Z_4=Z_4,
            )

            dderiv_B_φ = dderiv_B_φ_func(
                B_φ=B_φ, B_θ=B_θ, η_O=η_O, η_H=η_H, η_A=η_A, θ=θ, v_r=v_r,
                v_φ=v_φ, deriv_B_r=deriv_B_r, deriv_B_θ=deriv_B_θ,
                deriv_B_φ=deriv_B_φ, deriv_η_O=deriv_η_O, deriv_η_A=deriv_η_A,
                deriv_η_H=deriv_η_H, A=A, C=C, b_r=b_r, b_θ=b_θ, b_φ=b_φ,
                Z_6=Z_6, Z_7=Z_7, deriv_b_θ=deriv_b_θ, deriv_b_φ=deriv_b_φ,
                deriv_b_r=deriv_b_r,
            )

            deriv_v_r = deriv_v_r_func(
                a_0=a_0, B_θ=B_θ, v_φ=v_φ, ρ=ρ, dderiv_B_φ=dderiv_B_φ, Z_1=Z_1,
                Z_2=Z_2, Z_4=Z_4, norm_kepler=norm_kepler, X=X, v_r=v_r,
            )

            deriv_v_φ = deriv_v_φ_func(
                Z_2=Z_2, deriv_v_r=deriv_v_r, dderiv_B_φ=dderiv_B_φ,
                norm_kepler=norm_kepler, v_r=v_r, X=X, B_θ=B_θ,
            )

        derivs[ODEIndex.B_r] = deriv_B_r
        derivs[ODEIndex.B_φ] = deriv_B_φ
        derivs[ODEIndex.B_θ] = deriv_B_θ
        derivs[ODEIndex.ρ] = deriv_ρ
        derivs[ODEIndex.η_O] = deriv_η_O
        derivs[ODEIndex.η_A] = deriv_η_A
        derivs[ODEIndex.η_H] = deriv_η_H

        derivs[ODEIndex.v_θ] = 0
        if use_E_r:
            derivs[ODEIndex.v_r] = 0
            derivs[ODEIndex.v_φ] = 0
            derivs[ODEIndex.E_r] = deriv_E_r
        else:
            derivs[ODEIndex.v_r] = deriv_v_r
            derivs[ODEIndex.v_φ] = deriv_v_φ
            derivs[ODEIndex.B_φ_prime] = dderiv_B_φ

        if __debug__:
            log.debug("θ: {}, {}", θ, degrees(θ))

        if store_internal:
            params_list.append(copy(params))
            derivs_list.append(copy(derivs))
            angles_list.append(θ)

            if len(params_list) != len(angles_list):
                log.error(
                    "Internal data not consistent, "
                    "params is {}, angles is {}".format(
                        len(params_list), len(angles_list)
                    )
                )

        return 0
Exemplo n.º 56
0
def test_precision_recall_curve_toydata():
    with np.errstate(all="raise"):
        # Binary classification
        y_true = [0, 1]
        y_score = [0, 1]
        p, r, _ = precision_recall_curve(y_true, y_score)
        auc_prc = average_precision_score(y_true, y_score)
        assert_array_almost_equal(p, [1, 1])
        assert_array_almost_equal(r, [1, 0])
        assert_almost_equal(auc_prc, 1.)

        y_true = [0, 1]
        y_score = [1, 0]
        p, r, _ = precision_recall_curve(y_true, y_score)
        auc_prc = average_precision_score(y_true, y_score)
        assert_array_almost_equal(p, [0.5, 0., 1.])
        assert_array_almost_equal(r, [1., 0., 0.])
        # Here we are doing a terrible prediction: we are always getting
        # it wrong, hence the average_precision_score is the accuracy at
        # chance: 50%
        assert_almost_equal(auc_prc, 0.5)

        y_true = [1, 0]
        y_score = [1, 1]
        p, r, _ = precision_recall_curve(y_true, y_score)
        auc_prc = average_precision_score(y_true, y_score)
        assert_array_almost_equal(p, [0.5, 1])
        assert_array_almost_equal(r, [1., 0])
        assert_almost_equal(auc_prc, .5)

        y_true = [1, 0]
        y_score = [1, 0]
        p, r, _ = precision_recall_curve(y_true, y_score)
        auc_prc = average_precision_score(y_true, y_score)
        assert_array_almost_equal(p, [1, 1])
        assert_array_almost_equal(r, [1, 0])
        assert_almost_equal(auc_prc, 1.)

        y_true = [1, 0]
        y_score = [0.5, 0.5]
        p, r, _ = precision_recall_curve(y_true, y_score)
        auc_prc = average_precision_score(y_true, y_score)
        assert_array_almost_equal(p, [0.5, 1])
        assert_array_almost_equal(r, [1, 0.])
        assert_almost_equal(auc_prc, .5)

        y_true = [0, 0]
        y_score = [0.25, 0.75]
        assert_raises(Exception, precision_recall_curve, y_true, y_score)
        assert_raises(Exception, average_precision_score, y_true, y_score)

        y_true = [1, 1]
        y_score = [0.25, 0.75]
        p, r, _ = precision_recall_curve(y_true, y_score)
        assert_almost_equal(average_precision_score(y_true, y_score), 1.)
        assert_array_almost_equal(p, [1., 1., 1.])
        assert_array_almost_equal(r, [1, 0.5, 0.])

        # Multi-label classification task
        y_true = np.array([[0, 1], [0, 1]])
        y_score = np.array([[0, 1], [0, 1]])
        assert_raises(Exception,
                      average_precision_score,
                      y_true,
                      y_score,
                      average="macro")
        assert_raises(Exception,
                      average_precision_score,
                      y_true,
                      y_score,
                      average="weighted")
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="samples"), 1.)
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="micro"), 1.)

        y_true = np.array([[0, 1], [0, 1]])
        y_score = np.array([[0, 1], [1, 0]])
        assert_raises(Exception,
                      average_precision_score,
                      y_true,
                      y_score,
                      average="macro")
        assert_raises(Exception,
                      average_precision_score,
                      y_true,
                      y_score,
                      average="weighted")
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="samples"), 0.75)
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="micro"), 0.5)

        y_true = np.array([[1, 0], [0, 1]])
        y_score = np.array([[0, 1], [1, 0]])
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="macro"), 0.5)
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="weighted"), 0.5)
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="samples"), 0.5)
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="micro"), 0.5)

        y_true = np.array([[1, 0], [0, 1]])
        y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="macro"), 0.5)
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="weighted"), 0.5)
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="samples"), 0.5)
        assert_almost_equal(
            average_precision_score(y_true, y_score, average="micro"), 0.5)
Exemplo n.º 57
0
def ufunc_class_factory(name, nargin, nargout, docstring):
    """Create a Ufunc `Operator` from a given specification."""

    assert 0 <= nargin <= 2

    def __init__(self, space):
        """Initialize an instance.

        Parameters
        ----------
        space : `TensorSpace`
            The domain of the operator.
        """
        if not isinstance(space, LinearSpace):
            raise TypeError('`space` {!r} not a `LinearSpace`'.format(space))

        if nargin == 1:
            domain = space0 = space
            dtypes = [space.dtype]
        elif nargin == len(space) == 2 and isinstance(space, ProductSpace):
            domain = space
            space0 = space[0]
            dtypes = [space[0].dtype, space[1].dtype]
        else:
            domain = ProductSpace(space, nargin)
            space0 = space
            dtypes = [space.dtype, space.dtype]

        dts_out = dtypes_out(name, dtypes)

        if nargout == 1:
            range = space0.astype(dts_out[0])
        else:
            range = ProductSpace(space0.astype(dts_out[0]),
                                 space0.astype(dts_out[1]))

        linear = name in LINEAR_UFUNCS
        Operator.__init__(self, domain=domain, range=range, linear=linear)

    def _call(self, x, out=None):
        """Return ``self(x)``."""
        # TODO: use `__array_ufunc__` when implemented on `ProductSpace`,
        # or try both
        if out is None:
            if nargin == 1:
                return getattr(x.ufuncs, name)()
            else:
                return getattr(x[0].ufuncs, name)(*x[1:])
        else:
            if nargin == 1:
                return getattr(x.ufuncs, name)(out=out)
            else:
                return getattr(x[0].ufuncs, name)(*x[1:], out=out)

    def __repr__(self):
        """Return ``repr(self)``."""
        return '{}({!r})'.format(name, self.domain)

    # Create example (also functions as doctest)
    if 'shift' in name or 'bitwise' in name or name == 'invert':
        dtype = int
    else:
        dtype = float

    space = tensor_space(3, dtype=dtype)
    if nargin == 1:
        vec = space.element([-1, 1, 2])
        arg = '{}'.format(vec)
        with np.errstate(all='ignore'):
            result = getattr(vec.ufuncs, name)()
    else:
        vec = space.element([-1, 1, 2])
        vec2 = space.element([3, 4, 5])
        arg = '[{}, {}]'.format(vec, vec2)
        with np.errstate(all='ignore'):
            result = getattr(vec.ufuncs, name)(vec2)

    if nargout == 2:
        result_space = ProductSpace(vec.space, 2)
        result = repr(result_space.element(result))

    examples_docstring = RAW_EXAMPLES_DOCSTRING.format(space=space,
                                                       name=name,
                                                       arg=arg,
                                                       result=result)
    full_docstring = docstring + examples_docstring

    attributes = {
        "__init__": __init__,
        "_call": _call,
        "derivative": derivative_factory(name),
        "__repr__": __repr__,
        "__doc__": full_docstring
    }

    full_name = name + '_op'

    return type(full_name, (Operator, ), attributes)
Exemplo n.º 58
0
    for x in version_string.split('.'):
        try:
            version.append(int(x))
        except ValueError:
            # x may be of the form dev-1ea1592
            version.append(x)
    return tuple(version)


np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)


try:
    from scipy.special import expit     # SciPy >= 0.10
    with np.errstate(invalid='ignore', over='ignore'):
        if np.isnan(expit(1000)):       # SciPy < 0.14
            raise ImportError("no stable expit in scipy.special")
except ImportError:
    def expit(x, out=None):
        """Logistic sigmoid function, ``1 / (1 + exp(-x))``.

        See sklearn.utils.extmath.log_logistic for the log of this function.
        """
        if out is None:
            out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
        out[:] = x

        # 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
        # This way of computing the logistic is both fast and stable.
        out *= .5
Exemplo n.º 59
0
    def get_matrix_element_AlbrechtBC(self,
                                      omega,
                                      gamma=0.1,
                                      ml=[1],
                                      term='BC'):
        """Evaluate Albrecht B and/or C term(s)."""
        self.read()

        self.timer.start('AlbrechtBC')

        if not hasattr(self, 'fco'):
            self.fco = FranckCondonOverlap()

        # excited state forces
        F_pr = self.exF_rp.T

        m_rcc = np.zeros((self.ndof, 3, 3), dtype=complex)
        for p, energy in enumerate(self.ex0E_p):
            S_r = self.get_Huang_Rhys_factors(F_pr[p])

            for m in ml:
                self.timer.start('Franck-Condon overlaps')
                fc1mm1_r = self.fco.direct(1, m, S_r)
                fc0mm02_r = self.fco.direct(0, m, S_r)
                fc0mm02_r += np.sqrt(2) * self.fco.direct0mm2(m, S_r)
                # XXXXX
                fc1mm1_r[-1] = 1
                fc0mm02_r[-1] = 1
                print(m, fc1mm1_r[-1], fc0mm02_r[-1])
                self.timer.stop('Franck-Condon overlaps')

                self.timer.start('me dervivatives')
                dm_rc = []
                r = 0
                for a in self.indices:
                    for i in 'xyz':
                        dm_rc.append(
                            (self.expm_rpc[r, p] - self.exmm_rpc[r, p]) *
                            self.im[r])
                        print('pm=', self.expm_rpc[r, p], self.exmm_rpc[r, p])
                        r += 1
                dm_rc = np.array(dm_rc) / (2 * self.delta)
                self.timer.stop('me dervivatives')

                self.timer.start('map to modes')
                # print('dm_rc[2], dm_rc[5]', dm_rc[2], dm_rc[5])
                print('dm_rc=', dm_rc)
                dm_rc = np.dot(dm_rc.T, self.modes.T).T
                print('dm_rc[-1][2]', dm_rc[-1][2])
                self.timer.stop('map to modes')

                self.timer.start('multiply')
                # me_cc = np.outer(self.ex0m_pc[p], self.ex0m_pc[p].conj())
                for r in range(self.ndof):
                    if 'B' in term:
                        # XXXX
                        denom = (1. / (energy + m * 0 * self.om_r[r] - omega -
                                       1j * gamma))
                        # ok print('denom=', denom)
                        m_rcc[r] += (
                            np.outer(dm_rc[r], self.ex0m_pc[p].conj()) *
                            fc1mm1_r[r] * denom)
                        if r == 5:
                            print('m_rcc[r]=', m_rcc[r][2, 2])
                        m_rcc[r] += (
                            np.outer(self.ex0m_pc[p], dm_rc[r].conj()) *
                            fc0mm02_r[r] * denom)
                    if 'C' in term:
                        denom = (1. /
                                 (energy +
                                  (m - 1) * self.om_r[r] + omega + 1j * gamma))
                        m_rcc[r] += (
                            np.outer(self.ex0m_pc[p], dm_rc[r].conj()) *
                            fc1mm1_r[r] * denom)
                        m_rcc[r] += (
                            np.outer(dm_rc[r], self.ex0m_pc[p].conj()) *
                            fc0mm02_r[r] * denom)
                self.timer.stop('multiply')
        print('m_rcc[-1]=', m_rcc[-1][2, 2])

        self.timer.start('pre_r')
        with np.errstate(divide='ignore'):
            pre_r = np.where(self.om_r > 0,
                             np.sqrt(units._hbar**2 / 2. / self.om_r), 0)
            # print('BC: pre_r=', pre_r)
        for r, p in enumerate(pre_r):
            m_rcc[r] *= p
        self.timer.stop('pre_r')
        self.timer.stop('AlbrechtBC')
        return m_rcc
    def run(self, exposure, catalog):
        """!Measure aperture correction

        @param[in]  exposure  Exposure aperture corrections are being measured
                              on. The bounding box is retrieved from it, and
                              it is passed to the sourceSelector.
                              The output aperture correction map is *not*
                              added to the exposure; this is left to the
                              caller.

        @param[in]  catalog   SourceCatalog containing measurements to be used
                              to compute aperturecorrections.

        @return an lsst.pipe.base.Struct containing:
        - apCorrMap: an aperture correction map (lsst.afw.image.ApCorrMap) that contains two entries
            for each flux field:
            - flux field (e.g. base_PsfFlux_instFlux): 2d model
            - flux sigma field (e.g. base_PsfFlux_instFluxErr): 2d model of error
        """
        bbox = exposure.getBBox()
        import lsstDebug
        display = lsstDebug.Info(__name__).display
        doPause = lsstDebug.Info(__name__).doPause

        self.log.info("Measuring aperture corrections for %d flux fields" % (len(self.toCorrect),))
        # First, create a subset of the catalog that contains only selected stars
        # with non-flagged reference fluxes.
        subset1 = [record for record in self.sourceSelector.run(catalog, exposure=exposure).sourceCat
                   if (not record.get(self.refFluxKeys.flag) and
                       numpy.isfinite(record.get(self.refFluxKeys.flux)))]

        apCorrMap = ApCorrMap()

        # Outer loop over the fields we want to correct
        for name, keys in self.toCorrect.items():
            fluxName = name + "_instFlux"
            fluxErrName = name + "_instFluxErr"

            # Create a more restricted subset with only the objects where the to-be-correct flux
            # is not flagged.
            fluxes = numpy.fromiter((record.get(keys.flux) for record in subset1), float)
            with numpy.errstate(invalid="ignore"):  # suppress NAN warnings
                isGood = numpy.logical_and.reduce([
                    numpy.fromiter((not record.get(keys.flag) for record in subset1), bool),
                    numpy.isfinite(fluxes),
                    fluxes > 0.0,
                ])
            subset2 = [record for record, good in zip(subset1, isGood) if good]

            # Check that we have enough data points that we have at least the minimum of degrees of
            # freedom specified in the config.
            if len(subset2) - 1 < self.config.minDegreesOfFreedom:
                if name in self.config.allowFailure:
                    self.log.warn("Unable to measure aperture correction for '%s': "
                                  "only %d sources, but require at least %d." %
                                  (name, len(subset2), self.config.minDegreesOfFreedom+1))
                    continue
                raise RuntimeError("Unable to measure aperture correction for required algorithm '%s': "
                                   "only %d sources, but require at least %d." %
                                   (name, len(subset2), self.config.minDegreesOfFreedom+1))

            # If we don't have enough data points to constrain the fit, reduce the order until we do
            ctrl = self.config.fitConfig.makeControl()
            while len(subset2) - ctrl.computeSize() < self.config.minDegreesOfFreedom:
                if ctrl.orderX > 0:
                    ctrl.orderX -= 1
                if ctrl.orderY > 0:
                    ctrl.orderY -= 1

            # Fill numpy arrays with positions and the ratio of the reference flux to the to-correct flux
            x = numpy.zeros(len(subset2), dtype=float)
            y = numpy.zeros(len(subset2), dtype=float)
            apCorrData = numpy.zeros(len(subset2), dtype=float)
            indices = numpy.arange(len(subset2), dtype=int)
            for n, record in enumerate(subset2):
                x[n] = record.getX()
                y[n] = record.getY()
                apCorrData[n] = record.get(self.refFluxKeys.flux)/record.get(keys.flux)

            for _i in range(self.config.numIter):

                # Do the fit, save it in the output map
                apCorrField = ChebyshevBoundedField.fit(bbox, x, y, apCorrData, ctrl)

                if display:
                    plotApCorr(bbox, x, y, apCorrData, apCorrField, "%s, iteration %d" % (name, _i), doPause)

                # Compute errors empirically, using the RMS difference between the true reference flux and the
                # corrected to-be-corrected flux.
                apCorrDiffs = apCorrField.evaluate(x, y)
                apCorrDiffs -= apCorrData
                apCorrErr = numpy.mean(apCorrDiffs**2)**0.5

                # Clip bad data points
                apCorrDiffLim = self.config.numSigmaClip * apCorrErr
                with numpy.errstate(invalid="ignore"):  # suppress NAN warning
                    keep = numpy.fabs(apCorrDiffs) <= apCorrDiffLim
                x = x[keep]
                y = y[keep]
                apCorrData = apCorrData[keep]
                indices = indices[keep]

            # Final fit after clipping
            apCorrField = ChebyshevBoundedField.fit(bbox, x, y, apCorrData, ctrl)

            self.log.info("Aperture correction for %s: RMS %f from %d" %
                          (name, numpy.mean((apCorrField.evaluate(x, y) - apCorrData)**2)**0.5, len(indices)))

            if display:
                plotApCorr(bbox, x, y, apCorrData, apCorrField, "%s, final" % (name,), doPause)

            # Save the result in the output map
            # The error is constant spatially (we could imagine being
            # more clever, but we're not yet sure if it's worth the effort).
            # We save the errors as a 0th-order ChebyshevBoundedField
            apCorrMap[fluxName] = apCorrField
            apCorrErrCoefficients = numpy.array([[apCorrErr]], dtype=float)
            apCorrMap[fluxErrName] = ChebyshevBoundedField(bbox, apCorrErrCoefficients)

            # Record which sources were used
            for i in indices:
                subset2[i].set(keys.used, True)

        return Struct(
            apCorrMap=apCorrMap,
        )