Ejemplo n.º 1
0
    def test_log2(self):
        """log2: should work fine on positive/negative numbers and zero"""
        self.assertEqual(log2(1),0)
        self.assertEqual(log2(2),1)
        self.assertEqual(log2(4),2)
        self.assertEqual(log2(8),3)

        #SUPPORT2425
        #with numpy_err(divide='ignore'):
        ori_err = numpy.geterr()
        numpy.seterr(divide='ignore')
        try:
            try:
                self.assertEqual(log2(0),float('-inf'))
            except (ValueError, OverflowError):      #platform-dependent
                pass
        finally:
            numpy.seterr(**ori_err)

        #SUPPORT2425
        ori_err = numpy.geterr()
        numpy.seterr(divide='raise')
        try:
        #with numpy_err(divide='raise'):
            self.assertRaises(FloatingPointError, log2, 0)
        finally:
            numpy.seterr(**ori_err)

        #nan is the only thing that's not equal to itself
        try:
            self.assertNotEqual(log2(-1),log2(-1)) #now nan
        except ValueError:
            pass
Ejemplo n.º 2
0
def less(a, b):
    """return a < b, while comparing nan results in False without warning"""
    current_err_setting = np.geterr()
    np.seterr(invalid='ignore')
    res = a < b
    np.seterr(**current_err_setting)
    return res
Ejemplo n.º 3
0
def H_mag(num, den, z, H_max, H_min = None, log = False, div_by_0 = 'ignore'):
    """
    Calculate `|H(z)|` at the complex frequency(ies) `z` (scalar or
    array-like).  The function `H(z)` is given in polynomial form with numerator and
    denominator. When log = True, `20 log_10 (|H(z)|)` is returned.

    The result is clipped at H_min, H_max; clipping can be disabled by passing
    None as the argument.

    Parameters
    ----------
    num : float or array-like
        The numerator polynome of H(z).
    den : float or array-like
        The denominator polynome of H(z).
    z : float or array-like
        The complex frequency(ies) where `H(z)` is to be evaluated
    H_max : float
        The maximum value to which the result is clipped
    H_min : float, optional
        The minimum value to which the result is clipped (default: 0)
    log : boolean, optional
        When true, return 20 * log10 (|H(z)|). The clipping limits have to
        be given as dB in this case.
    div_by_0 : string, optional
        What to do when division by zero occurs during calculation (default:
        'ignore'). As the denomintor of H(z) becomes 0 at each pole, warnings
        are suppressed by default. This parameter is passed to numpy.seterr(),
        hence other valid options are 'warn', 'raise' and 'print'.

    Returns
    -------
    H_mag : float or ndarray
        The magnitude |`H(z)`| for each value of `z`.
    """

    try: len(num)
    except TypeError:
        num_val = abs(num) # numerator is a scalar
    else:
        num_val = abs(np.polyval(num, z)) # evaluate numerator at z
    try: len(den)
    except TypeError:
        den_val = abs(den) # denominator is a scalar
    else:
        den_val = abs(np.polyval(den, z)) # evaluate denominator at z

    olderr = np.geterr()  # store current floating point error behaviour
    # turn off divide by zero warnings, just return 'inf':
    np.seterr(divide = 'ignore')

    if log:
        H_val = 20 * np.log10(num_val / den_val)
    else:
        H_val = num_val / den_val

    np.seterr(**olderr) # restore previous floating point error behaviour

    # clip result to H_min / H_max
    return np.clip(H_val, H_min, H_max)
Ejemplo n.º 4
0
def test_numerical_stability():
    """Check numerical stability."""
    old_settings = np.geterr()
    np.seterr(all="raise")

    X = np.array(
        [
            [152.08097839, 140.40744019, 129.75102234, 159.90493774],
            [142.50700378, 135.81935120, 117.82884979, 162.75781250],
            [127.28772736, 140.40744019, 129.75102234, 159.90493774],
            [132.37025452, 143.71923828, 138.35694885, 157.84558105],
            [103.10237122, 143.71928406, 138.35696411, 157.84559631],
            [127.71276855, 143.71923828, 138.35694885, 157.84558105],
            [120.91514587, 140.40744019, 129.75102234, 159.90493774],
        ]
    )

    y = np.array([1.0, 0.70209277, 0.53896582, 0.0, 0.90914464, 0.48026916, 0.49622521])

    dt = tree.DecisionTreeRegressor()
    dt.fit(X, y)
    dt.fit(X, -y)
    dt.fit(-X, y)
    dt.fit(-X, -y)

    np.seterr(**old_settings)
Ejemplo n.º 5
0
def VMLookupTable():
    try:  # try loading ordered dict
        from collections import OrderedDict
    except ImportError:  # not installed, try on pythonpath
        try:
            from OrderedDict import OrderedDict
        except ImportError:
            "PyNoddy requires OrderedDict to run. Please download it and make it available on the pythonpath."

    kappa_lookup = OrderedDict()


    #disable numpy warnings
    err = np.geterr()
    np.seterr(all='ignore')
    
    
    # build lookup table
    for k in range(1000, 100, -20):
        ci = sc.stats.vonmises.interval(0.95, k)
        kappa_lookup[ci[1]] = k
    for k in range(100, 10, -1):
        ci = sc.stats.vonmises.interval(0.95, k)
        kappa_lookup[ci[1]] = k
    for k in np.arange(10, 0, -0.1):
        ci = sc.stats.vonmises.interval(0.95, k)
        kappa_lookup[ci[1]] = k

    #re-enable numpy warnings
    np.seterr(**err)
    
    # return lookup table
    return kappa_lookup
Ejemplo n.º 6
0
 def setUp(self):
     # Most generic way to get the actual data directory.
     self.data_dir = os.path.join(os.path.dirname(os.path.abspath(
         inspect.getfile(inspect.currentframe()))), "data")
     self.image_dir = os.path.join(os.path.dirname(__file__), 'images')
     self.nperr = np.geterr()
     np.seterr(all='ignore')
Ejemplo n.º 7
0
 def test_ppsd_plot_cumulative(self):
     """
     Test plot of ppsd example data, cumulative style.
     """
     # Catch underflow warnings due to plotting on log-scale.
     _t = np.geterr()
     np.seterr(all="ignore")
     try:
         with ImageComparison(self.path, 'ppsd_cumulative.png',
                              reltol=1.5) as ic:
             self.ppsd.plot(
                 show=False, show_coverage=True, show_histogram=True,
                 show_noise_models=True, grid=True, period_lim=(0.02, 100),
                 cumulative=True,
                 # This does not do anything but silences a warning that
                 # the `cumulative` and `max_percentage` arguments cannot
                 #  be used at the same time.
                 max_percentage=None)
             fig = plt.gcf()
             ax = fig.axes[0]
             ax.set_ylim(-160, -130)
             plt.draw()
             fig.savefig(ic.name)
     finally:
         np.seterr(**_t)
Ejemplo n.º 8
0
    def __init__(self, hidden_layers=[32]):
        restore_these_settings = np.geterr()

        temp_settings = restore_these_settings.copy()
        temp_settings["over"] = "ignore"
        temp_settings["under"] = "ignore"

        np.seterr(**temp_settings)
        np.seterr(**restore_these_settings)

        self.input_nodes = 65  # number of input nodes + 1 for the bias
        self.hidden_layers = hidden_layers  # list containing the number of hidden nodes and number of nodes per layer
        self.output_nodes = 10  # number of output nodes
        self.alpha = 1.0  # scalar used when back propagating the errors

        # initialize the weights which are is a list of nxm 2d arrays where each weight corresponds to a layer
        self.weights = [
            np.random.random_sample((self.input_nodes, self.hidden_layers[0])) * .09 + .01
        ]

        for i in range(len(hidden_layers) - 1):
            self.weights.append(
                np.random.random_sample((self.hidden_layers[i], self.hidden_layers[i + 1])) * .09 + .01)

        self.weights.append(
            np.random.random_sample((self.hidden_layers[-1], self.output_nodes)) * .09 + .01)
Ejemplo n.º 9
0
def test_numpy_err_state_is_default():
    expected = {"over": "warn", "divide": "warn",
                "invalid": "warn", "under": "ignore"}
    import numpy as np

    # The error state should be unchanged after that import.
    assert np.geterr() == expected
Ejemplo n.º 10
0
    def test_scale_trace(self):
        """scale_trace should scale trace to correct values"""
        #should scale to -1 by default
        #WARNING: won't work with integer matrices
        m = array([[-2., 0],[0,-2]])
        scale_trace(m)
        self.assertFloatEqual(m, [[-0.5, 0],[0,-0.5]])
        #should work even with zero rows
        m = array([
                [1.0,2,3,4],
                [2,4,4,0],
                [1,1,0,1],
                [0,0,0,0]
        ])
        m_orig = m.copy()
        scale_trace(m)
        self.assertFloatEqual(m, m_orig / -5)
        #but should fail if trace is zero
        m = array([[0,1,1],[1,0,1],[1,1,0]])

        #SUPPORT2425
        ori_err = numpy.geterr()
        numpy.seterr(divide='raise')
        try:
        #with numpy_err(divide='raise'):
            self.assertRaises((ZeroDivisionError, FloatingPointError), \
                scale_trace, m)
        finally:
            numpy.seterr(**ori_err)
Ejemplo n.º 11
0
 def test_smoothingMatrix(self):
     """
     Tests some aspects of the matrix.
     """
     # Disable div by zero errors.
     temp = np.geterr()
     np.seterr(all="ignore")
     frequencies = np.array([0.0, 1.0, 2.0, 10.0, 25.0, 50.0, 100.0], dtype=np.float32)
     matrix = calculateSmoothingMatrix(frequencies, 20.0)
     self.assertEqual(matrix.dtype, np.float32)
     for _i, freq in enumerate(frequencies):
         np.testing.assert_array_equal(matrix[_i], konnoOhmachiSmoothingWindow(frequencies, freq, 20.0))
         # Should not be normalized. Test only for larger frequencies
         # because smaller ones have a smaller window.
         if freq >= 10.0:
             self.assertTrue(matrix[_i].sum() > 1.0)
     # Input should be output dtype.
     frequencies = np.array([0.0, 1.0, 2.0, 10.0, 25.0, 50.0, 100.0], dtype=np.float64)
     matrix = calculateSmoothingMatrix(frequencies, 20.0)
     self.assertEqual(matrix.dtype, np.float64)
     # Check normalization.
     frequencies = np.array([0.0, 1.0, 2.0, 10.0, 25.0, 50.0, 100.0], dtype=np.float32)
     matrix = calculateSmoothingMatrix(frequencies, 20.0, normalize=True)
     self.assertEqual(matrix.dtype, np.float32)
     for _i, freq in enumerate(frequencies):
         np.testing.assert_array_equal(
             matrix[_i], konnoOhmachiSmoothingWindow(frequencies, freq, 20.0, normalize=True)
         )
         # Should not be normalized. Test only for larger frequencies
         # because smaller ones have a smaller window.
         self.assertAlmostEqual(matrix[_i].sum(), 1.0, 5)
     np.seterr(**temp)
Ejemplo n.º 12
0
 def test_smoothingWindow(self):
     """
     Tests the creation of the smoothing window.
     """
     # Disable div by zero errors.
     temp = np.geterr()
     np.seterr(all="ignore")
     # Frequency of zero results in a delta peak at zero (there usually
     # should be just one zero in the frequency array.
     window = konnoOhmachiSmoothingWindow(np.array([0, 1, 0, 3], dtype=np.float32), 0)
     np.testing.assert_array_equal(window, np.array([1, 0, 1, 0], dtype=np.float32))
     # Wrong dtypes raises.
     self.assertRaises(ValueError, konnoOhmachiSmoothingWindow, np.arange(10, dtype=np.int32), 10)
     # If frequency=center frequency, log results in infinity. Limit of
     # whole formulae is 1.
     window = konnoOhmachiSmoothingWindow(np.array([5.0, 1.0, 5.0, 2.0], dtype=np.float32), 5)
     np.testing.assert_array_equal(window[[0, 2]], np.array([1.0, 1.0], dtype=np.float32))
     # Output dtype should be the dtype of frequencies.
     self.assertEqual(konnoOhmachiSmoothingWindow(np.array([1, 6, 12], dtype=np.float32), 5).dtype, np.float32)
     self.assertEqual(konnoOhmachiSmoothingWindow(np.array([1, 6, 12], dtype=np.float64), 5).dtype, np.float64)
     # Check if normalizing works.
     window = konnoOhmachiSmoothingWindow(self.frequencies, 20)
     self.assertTrue(window.sum() > 1.0)
     window = konnoOhmachiSmoothingWindow(self.frequencies, 20, normalize=True)
     self.assertAlmostEqual(window.sum(), 1.0, 5)
     # Just one more to test if there are no invalid values and the range if
     # ok.
     window = konnoOhmachiSmoothingWindow(self.frequencies, 20)
     self.assertEqual(np.any(np.isnan(window)), False)
     self.assertEqual(np.any(np.isinf(window)), False)
     self.assertTrue(np.all(window <= 1.0))
     self.assertTrue(np.all(window >= 0.0))
     np.seterr(**temp)
Ejemplo n.º 13
0
def gaussian_highpass(img, high_cutoff, pad=1):
    ''' Apply a Gaussian highpass filter to an image
    
    .. seealso:: :py:func:`gaussian_highpass_kernel`
    
    :Parameters:
    
    img : array
          Image
    high_cutoff : float
                 High-frequency cutoff
    pad : int
          Padding
    
    :Returns:
    
    out : array
          Filtered image
    '''
    
    ctype = numpy.complex128 if img.dtype is numpy.float64 else numpy.complex64
    if pad > 1:
        shape = img.shape
        img = pad_image(img.astype(ctype), (int(img.shape[0]*pad), int(img.shape[1]*pad)), 'e')
    else: img = img.astype(ctype)
    state = numpy.geterr()
    numpy.seterr(all='ignore')
    img = filter_image(img, gaussian_highpass_kernel(img.shape, high_cutoff, img.dtype), pad)
    numpy.seterr(**state)
    if pad > 1: img = depad(img, shape)
    return img
Ejemplo n.º 14
0
 def wrapped(e):
     old_settings = np.geterr()
     np.seterr(invalid="raise")
     try:
         return func(e)
     except exception:
         warnings.warn(msg + " " + e.fname, exc.EmptyStep)
     np.seterr(**old_settings)
Ejemplo n.º 15
0
def test_numpy_errstate_is_default():
    # The defaults since numpy 1.6.0
    expected = {"over": "warn", "divide": "warn", "invalid": "warn", "under": "ignore"}
    import numpy as np
    from pandas.compat import numpy  # noqa

    # The errstate should be unchanged after that import.
    tm.assert_equal(np.geterr(), expected)
Ejemplo n.º 16
0
def test_numpy_errstate_is_default():
    # The defaults since numpy 1.6.0
    expected = {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
                'under': 'ignore'}
    import numpy as np
    from pandas.compat import numpy  # noqa
    # The errstate should be unchanged after that import.
    assert np.geterr() == expected
Ejemplo n.º 17
0
 def __enter__(self):
     try:
         import numpy
     except ImportError:
         return None
     self.errstate = numpy.geterr()
     numpy.seterr(invalid="ignore")
     return numpy
Ejemplo n.º 18
0
    def summarize(self, interval, bins=None, method='summarize',
                  function='mean'):

        # We may be dividing by zero in some cases, which raises a warning in
        # NumPy based on the IEEE 754 standard (see
        # http://docs.scipy.org/doc/numpy/reference/generated/
        #       numpy.seterr.html)
        #
        # That's OK -- we're expecting that to happen sometimes. So temporarily
        # disable this error reporting for the duration of this method.
        orig = np.geterr()['invalid']
        np.seterr(invalid='ignore')

        if (bins is None) or (method == 'get_as_array'):
            bw = BigWigFile(open(self.fn))
            s = bw.get_as_array(
                interval.chrom,
                interval.start,
                interval.stop,)
            if s is None:
                s = np.zeros((interval.stop - interval.start,))
            else:
                s[np.isnan(s)] = 0

        elif method == 'ucsc_summarize':
            if function in ['mean', 'min', 'max', 'std', 'coverage']:
                return self.ucsc_summarize(interval, bins, function=function)
            else:
                raise ValueError('function "%s" not supported by UCSC\'s'
                                 'bigWigSummary')

        else:
            bw = BigWigFile(open(self.fn))
            s = bw.summarize(
                interval.chrom,
                interval.start,
                interval.stop, bins)
            if s is None:
                s = np.zeros((bins,))
            else:
                if function == 'sum':
                    s = s.sum_data
                if function == 'mean':
                    s = s.sum_data / s.valid_count
                    s[np.isnan(s)] = 0
                if function == 'min':
                    s = s.min_val
                    s[np.isinf(s)] = 0
                if function == 'max':
                    s = s.max_val
                    s[np.isinf(s)] = 0
                if function == 'std':
                    s = (s.sum_squares / s.valid_count)
                    s[np.isnan(s)] = 0

        # Reset NumPy error reporting
        np.seterr(divide=orig)
        return s
 def check_typecast(self, val, dtype):
     operators = [operator.add, operator.sub, operator.mul, operator.div]
     for op in operators:
         err = numpy.geterr()
         numpy.seterr(divide='ignore', invalid='ignore')
         a = op(val, (testing.shaped_arange((5,), numpy, dtype) - 2))
         numpy.seterr(**err)
         b = op(val, (testing.shaped_arange((5,), cupy, dtype) - 2))
         self.assertEqual(a.dtype, b.dtype)
Ejemplo n.º 20
0
    def _prepare_amr_slice(self, selection):
        """ return list of patches that contain selection """
        # FIXME: it's not good to reach in to src_field[0]'s private methods
        # like this, but it's also not good to implement these things twice
        # print("??", len(self.patches))
        if len(self.patches) == 0:
            raise ValueError("AMR field must contain patches to be slicable")
        selection, _ = self.patches[0]._prepare_slice(selection)
        extent = self.patches[0]._src_crds.get_slice_extent(selection)

        inds = []
        maybe = []  # these are patches that look like they contain selection
                    # but might not due to finite precision errors when
                    # calculating xh
        for i, fld in enumerate(self.patches):
            # - if xl - atol > the extent of the slice in any direction, then
            #   there's no overlap
            # - if xh <= the lower corner of the slice in any direction, then
            #   there's no overlap
            # the atol and equals are done to match cases where extent overlaps
            # the lower corner, but not the upper corner

            # logic goes this way cause extent has NaNs in
            # dimensions that aren't specified in selection... super-kludge

            # also, temporarily disable warnings on NaNs in numpy
            invalid_err_level = np.geterr()['invalid']
            np.seterr(invalid='ignore')
            atol = 100 * np.finfo(fld.crds.xl_nc.dtype).eps
            if (not np.any(np.logical_or(fld.crds.xl_nc - atol > extent[1],
                                         fld.crds.xh_nc <= extent[0]))):
                if np.any(np.isclose(fld.crds.xh_nc, extent[0], atol=atol)):
                    maybe.append(i)
                else:
                    inds.append(i)
            np.seterr(invalid=invalid_err_level)
        # if we found some maybes, but no real hits, then use the maybes
        if maybe and not inds:
            inds = maybe

        if len(inds) == 0:
            viscid.logger.error("selection {0} not in any patch @ time {1}"
                                "".format(selection, self.patches[0].time))
            if self.skeleton:
                s = ("         skeleton: xl= {0} xh = {1}"
                     "".format(self.skeleton.global_xl,
                               self.skeleton.global_xh))
                viscid.logger.error(s)
            inds = None
            flds = None
        elif len(inds) == 1:
            inds = inds[0]
            flds = self.patches[inds]
        else:
            flds = [self.patches[i] for i in inds]

        return flds, inds
Ejemplo n.º 21
0
 def setUp(self):
     # directory where the test files are located
     self.path = PATH
     self.path_images = os.path.join(PATH, os.pardir, "images")
     # some pre-computed ppsd used for plotting tests:
     # (ppsd._psd_periods was downcast to np.float16 to save space)
     self.example_ppsd_npz = os.path.join(PATH, "ppsd_kw1_ehz.npz")
     # ignore some "RuntimeWarning: underflow encountered in multiply"
     self.nperr = np.geterr()
     np.seterr(all='ignore')
Ejemplo n.º 22
0
Archivo: run.py Proyecto: j-faria/OPEN
	def do_replot(star):
		d, attrs, \
			x,y,err, \
			freq,power_rv,power_fwhm,power_bis,power_rhk, \
			bfreq, bpower_rv = get_data(h5file, star)

		# w1.removeItem(label)
		try: 
			teff = attrs.teff
			feh, feh_error = attrs.feh, attrs.feh_error
			logg, logg_error = attrs.logg, attrs.logg_error
			prot = attrs.prot
		except AttributeError:
			teff = feh = feh_error = logg = logg_error = 0
			prot = 0
		l = w1.children()[1]
		l.setText(label_text % (star, attrs.bmag, attrs.vmag,
			                     attrs.spect_type, 
			                     teff, feh, feh_error, logg, logg_error,
			                     len(x), 'HARPS')
				   )

		# w1.addWidget(label, row=0, col=0)

		## dock 3, the radial velocity data
		w3.plot(x, y, pen=None, symbol='o', symbolSize=5)
		errItem = pg.ErrorBarItem(x=x, y=y, height=err, beam=0.5)
		w3.addItem(errItem)
		w3.autoRange()

		## dock 5, periodogram BIS
		w5.plot(1./freq, power_bis, title="Periodogram BIS")

		## dock 6, periodogram RVs
		w6.plot(1./freq, power_rv, title="Periodogram")
		if bfreq is not None:
			w6.plot(1./bfreq, bpower_rv*max(power_rv), pen='r')
		w6.autoRange()

		## make sure we catch the FP error if prot=0 for some reason
		old_settings = np.geterr()
		np.seterr(divide='raise')
		try:
			vline_prot.setPos(np.log10(prot))
		except FloatingPointError:
			pass
		w6.addItem(vline_prot, ignoreBounds=True)
		w6.addItem(vLine, ignoreBounds=True)
		w6.addItem(hLine, ignoreBounds=True)
		
		## dock 7, periodogram FWHM
		w7.plot(1./freq, power_fwhm, title="Periodogram FWHM")

		## dock 8, periodogram RHK
		w8.plot(1./freq, power_rhk, title="Periodogram FWHM")
Ejemplo n.º 23
0
 def __init__(self,
              decimals=None,
              casting='same_kind',
              err=None,
              dtype=None,
              sparse=False,
              **kw):
     err = err if err is not None else np.geterr()
     super(TensorAround, self).__init__(_decimals=decimals,
                                        _casting=casting,
                                        _err=err,
                                        _dtype=dtype,
                                        _sparse=sparse,
                                        **kw)
Ejemplo n.º 24
0
    def test_plot_tfr(self):
        n = 295
        t, dt = np.linspace(0., 20 * np.pi, n, retstep=True)
        sig = np.sin(t)

        _t = np.geterr()
        np.seterr(all="ignore")
        try:
            with ImageComparison(self.path,
                                 'time_frequency_representation.png') as ic:
                plot_tfr(sig, dt=dt, show=False)
                plt.savefig(ic.name)
        finally:
            np.seterr(**_t)
Ejemplo n.º 25
0
 def __init__(self,
              deg=None,
              casting='same_kind',
              err=None,
              dtype=None,
              sparse=False,
              **kw):
     err = err if err is not None else np.geterr()
     super().__init__(_deg=deg,
                      _casting=casting,
                      _err=err,
                      _dtype=dtype,
                      _sparse=sparse,
                      **kw)
Ejemplo n.º 26
0
def u_star_func(u3, z3, zom, psi_z3, wind_coef=1):
    """"""
    u_star = np.array(zom, copy=True, ndmin=1)
    np.reciprocal(u_star, out=u_star)
    u_star *= z3
    oldsettings = np.geterr()
    np.seterr(invalid='ignore')
    np.log(u_star, out=u_star)
    np.seterr(invalid=oldsettings['invalid'])
    u_star -= psi_z3
    np.reciprocal(u_star, out=u_star)
    u_star *= (u3 * wind_coef * 0.41)
    # u_star = ((u3 * wind_coef * 0.41) / (np.log(z3 / zom) - psi_z3))
    return u_star
Ejemplo n.º 27
0
def pip_array_cn(p_a, poly):
    """Return bool array of 2D points inclusion, True where point is inside polygon.
    
    Uses crossing number algorithm.

    arguments:
       p_a (2D numpy float array): set of xy points to test for inclusion in polygon
       poly (2D numpy array, tuple or list of tuples or lists of at least 2 floats): the xy points
          defining a polygon

    returns:
       numpy boolean vector: True where corresponding point in p_a is within the polygon

    note:
       if the polygon is represented by a closed resqpy Polyline, pass Polyline.coordinates as poly
    """

    # p_array should be a numpy array of 2 or more axes; the final axis has extent at least 2, being x, y, ...
    # returned boolean array has shape of p_array less the final axis

    elements = np.prod(list(p_a.shape)[:-1], dtype=int)
    if elements == 0:
        return np.zeros((0, ), dtype=bool)
    p = p_a.reshape((elements, -1))
    crossings = np.zeros((elements, ), dtype=int)

    np_err_dict = np.geterr()
    np.seterr(divide='ignore', invalid='ignore')
    for edge in range(len(poly)):
        v1 = poly[edge - 1]
        v2 = poly[edge]
        crossings += np.where(
            np.logical_and(
                np.logical_and(
                    np.logical_or(v1[0] > p[:, 0], v2[0] > p[:, 0]),
                    np.logical_or(
                        np.logical_and(v1[1] <= p[:, 1], v2[1] > p[:, 1]),
                        np.logical_and(v1[1] > p[:, 1], v2[1] <= p[:, 1]))),
                np.logical_or(np.logical_and(v1[0] > p[:, 0], v2[0] > p[:, 0]),
                              (p[:, 0] <
                               (v1[0] + (v2[0] - v1[0]) * (p[:, 1] - v1[1]) /
                                (v2[1] - v1[1]))))), 1, 0)
    if 'divide' in np_err_dict:
        np.seterr(divide=np_err_dict['divide'])
    if 'invalid' in np_err_dict:
        np.seterr(invalid=np_err_dict['invalid'])

    return np.array(np.bitwise_and(crossings, 1),
                    dtype=bool).reshape(list(p_a.shape)[:-1])
Ejemplo n.º 28
0
def calc_nlqq(qest, clXX, clXY, clYY, flX, flY):
    errs = np.geterr(); np.seterr(divide='ignore', invalid='ignore')
    
    print( "[%s]"%watch.elapsed(), "calculating flat-sky noise level for estimator of type", type(qest))
    clqq_flatsky = qest.fill_clqq(ql.maps.cfft(nx,dx), clXX*flX*flX, clXY*flX*flY, clYY*flY*flY)
    resp_flatsky = qest.fill_resp(qest, ql.maps.cfft(nx, dx), flX, flY)
    nlqq_flatsky = clqq_flatsky / resp_flatsky**2
    
    print("[%s]"%watch.elapsed(), "calculating full-sky noise level for estimator of type", type(qest))
    clqq_fullsky = qest.fill_clqq(np.zeros(lmax+1, dtype=np.complex), clXX*flX*flX, clXY*flX*flY, clYY*flY*flY)
    resp_fullsky = qest.fill_resp(qest, np.zeros(lmax+1, dtype=np.complex), flX, flY)
    nlqq_fullsky = clqq_fullsky / resp_fullsky**2

    np.seterr(**errs)
    return nlqq_flatsky, nlqq_fullsky
Ejemplo n.º 29
0
 def _edges_intersect(self, edge1, edge2):
     """
     Return 1 if edges intersect completely (endpoints excluded)
     """
     h12 = self._intersect_edge_arrays(self.pts[np.array(edge1)], 
                                       self.pts[np.array(edge2)])
     h21 = self._intersect_edge_arrays(self.pts[np.array(edge2)], 
                                       self.pts[np.array(edge1)])
     err = np.geterr()
     np.seterr(divide='ignore', invalid='ignore')
     try:
         out = (0 < h12 < 1) and (0 < h21 < 1)
     finally:
         np.seterr(**err)
     return out
Ejemplo n.º 30
0
def convert_to_log(x, zero_case=None):
    """Return the log of a matrix.

    Silences the divide-by-zero warning before calculating the log. If
    zero_case is specified, all the -inf's in the resulting logged matrix are
    replaced with that value.

    """
    old = np.geterr()['divide']
    np.seterr(divide='ignore')
    out = np.log(x)
    if zero_case is not None:
        out = np.where(out == -np.inf, zero_case, out)
    np.seterr(divide=old)
    return out
Ejemplo n.º 31
0
def convert_to_log(x, zero_case=None):
    """Return the log of a matrix.

    Silences the divide-by-zero warning before calculating the log. If
    zero_case is specified, all the -inf's in the resulting logged matrix are
    replaced with that value.

    """
    old = np.geterr()['divide']
    np.seterr(divide='ignore')
    out = np.log(x)
    if zero_case is not None:
        out = np.where(out == -np.inf, zero_case, out)
    np.seterr(divide=old)
    return out
def calc_nlqq(qest, clXX, clXY, clYY, flX, flY):
    errs = np.geterr(); np.seterr(divide='ignore', invalid='ignore')

    print "[%s]"%watch.elapsed(), "calculating flat-sky noise level for estimator of type", type(qest)
    clqq_flatsky = qest.fill_clqq(ql.maps.cfft(nx,dx), clXX*flX*flX, clXY*flX*flY, clYY*flY*flY)
    resp_flatsky = qest.fill_resp(qest, ql.maps.cfft(nx, dx), flX, flY)
    nlqq_flatsky = clqq_flatsky / resp_flatsky**2

    print "[%s]"%watch.elapsed(), "calculating full-sky noise level for estimator of type", type(qest)
    clqq_fullsky = qest.fill_clqq(np.zeros(lmax+1, dtype=np.complex), clXX*flX*flX, clXY*flX*flY, clYY*flY*flY)
    resp_fullsky = qest.fill_resp(qest, np.zeros(lmax+1, dtype=np.complex), flX, flY)
    nlqq_fullsky = clqq_fullsky / resp_fullsky**2

    np.seterr(**errs)
    return nlqq_flatsky, nlqq_fullsky
Ejemplo n.º 33
0
def print_settings():

    print "data_dir: %s" % orca.get_injectable('data_dir')
    print "configs_dir: %s" % orca.get_injectable('configs_dir')
    print "households_sample_size = %s" % orca.get_injectable(
        'settings')['households_sample_size']
    print "preload_3d_skims = %s" % orca.get_injectable('preload_3d_skims')
    print "chunk_size = %s" % orca.get_injectable('chunk_size')
    print "hh_chunk_size = %s" % orca.get_injectable('hh_chunk_size')

    print "garbage collection enabled: %s" % gc.isenabled()
    print "garbage collection threshold: %s" % str(gc.get_threshold())
    print "numpy floating-point error-handling settings: %s" % np.geterr()
    print "pandas display options max_rows=%s max_columns=%s" % \
          (pd.options.display.max_rows, pd.options.display.max_columns)
Ejemplo n.º 34
0
 def edges_intersect(self, edge1, edge2):
     """
     Return 1 if edges intersect completely (endpoints excluded)
     """
     h12 = self.intersect_edge_arrays(self.pts[np.array(edge1)],
                                      self.pts[np.array(edge2)])
     h21 = self.intersect_edge_arrays(self.pts[np.array(edge2)],
                                      self.pts[np.array(edge1)])
     err = np.geterr()
     np.seterr(divide='ignore', invalid='ignore')
     try:
         out = (0 < h12 < 1) and (0 < h21 < 1)
     finally:
         np.seterr(**err)
     return out
Ejemplo n.º 35
0
    def apply(self, times, data):
        """
        Apply this filter.
        """
        pixels = data.shape[1:]

        # Average in time over all pixels in the image
        time_median = np.zeros(data.shape)
        for i in range(pixels[0]):
            for j in range(pixels[1]):
                time_median[:, i, j] = scipy.ndimage.median_filter(data[:, i,
                                                                        j],
                                                                   size=3)

        # Ignore divide-by-zero for a bit
        errs = np.geterr()
        np.seterr(divide='ignore')

        # Find pixels which vary alot, but only briefly
        #   Basically, we compare the value of each pixel v(t) at times
        #   (t1, t2, t3). If v(t1)~v(t3), but v(t2) ">>" v(t1), then we
        #   interpolate the value of the pixel in time.
        tf_forward = np.abs(time_median[:-1, :] / data[1:, :])
        tf_backward = np.abs(time_median[1:, :] / data[:-1, :])

        # Restore 'divide-by-zero' warnings
        np.seterr(**errs)

        # Locate all pixels that exceed the threshold
        # (make 'tfilter' all True to begin with)
        tfilter = (data >= 0)
        tfilter[1:-1] = (tf_forward[:-1] < self.threshold) & (tf_backward[1:] <
                                                              self.threshold)

        # Interpolate anomalous pixels
        sframes = np.copy(data)
        for i in range(pixels[0]):
            for j in range(pixels[1]):
                tslice = np.copy(data[:, i, j])
                tf_slice = tfilter[:, i, j]

                # In every time point where this pixel is anomalously bright,
                # we interpolate its value based on its previous and next (in time) value
                tslice[tf_slice] = np.interp(times[tf_slice], times[~tf_slice],
                                             tslice[~tf_slice])
                sframes[:, i, j] = tslice

        return sframes
Ejemplo n.º 36
0
    def calc_ci(self, para, direction):
        """
        Calculate the ci for a single parameter for a single direction.
        Direction is either positive or negative 1.
        """

        if isinstance(para, str):
            para = self.params[para]

        #function used to calculate the pro
        calc_prob = lambda val, prob: self.calc_prob(para, val, prob)
        if self.trace:
            x = [i.value for i in self.params.values()]
            self.trace_dict[para.name].append(x + [0])

        para.vary = False
        limit, max_prob = self.find_limit(para, direction)
        start_val = para.value.copy()
        a_limit = start_val.copy()
        ret = []
        orig_warn_settings = np.geterr()
        np.seterr(all='ignore')
        for prob in self.sigmas:
            if prob > max_prob:
                ret.append((prob, direction * np.inf))
                continue

            try:
                val = brentq(calc_prob, a_limit, limit, rtol=.5e-4, args=prob)

            except ValueError:
                self.reset_vals()
                try:
                    val = brentq(calc_prob,
                                 start_val,
                                 limit,
                                 rtol=.5e-4,
                                 args=prob)
                except ValueError:
                    val = np.nan

            a_limit = val
            ret.append((prob, val))

        para.vary = True
        self.reset_vals()
        np.seterr(**orig_warn_settings)
        return ret
Ejemplo n.º 37
0
    def test_big_vals(nums):
        """
        Test that nums is within iinfo limits
        :param nums: numpy array
        :return: bool numpy array
        Note switches off and restores warning on nans. Other way:
        ok = np.zeros_like(nums, dtype=np.bool8)
        not_na = ~np.isnan(nums)
        nums_not_na = nums[not_na]
        """

        save_invalid = np.geterr()['invalid']  # save floating-point errors handling
        np.seterr(invalid='ignore')  # switch off warning on nans
        ok = np.logical_and(iinfo.min < nums, nums < iinfo.max)
        np.seterr(invalid=save_invalid)  # restore floating-point errors handling
        return ok
Ejemplo n.º 38
0
 def __init__(self,
              rtol=None,
              atol=None,
              equal_nan=None,
              casting="same_kind",
              err=None,
              sparse=False,
              **kw):
     err = err if err is not None else np.geterr()
     super().__init__(_rtol=rtol,
                      _atol=atol,
                      _equal_nan=equal_nan,
                      _casting=casting,
                      _err=err,
                      sparse=sparse,
                      **kw)
Ejemplo n.º 39
0
    def calc_ci(self, para, direction):
        """
        Calculate the ci for a single parameter for a single direction.
        Direction is either positive or negative 1.
        """

        if isinstance(para, str):
            para = self.minimizer.params[para]

        #function used to calculate the pro
        calc_prob = lambda val, prob: self.calc_prob(para, val, prob)
        if self.trace:
            x = [i.value for i in self.minimizer.params.values()]
            self.trace_dict[para.name].append(x + [0])

        para.vary = False
        self.minimizer.prepare_fit(self.params)
        limit, max_prob = self.find_limit(para, direction)
        start_val = para.value.copy()
        a_limit = start_val.copy()
        ret = []
        orig_warn_settings = np.geterr()
        np.seterr(all='ignore')
        for prob in self.sigmas:
            if prob > max_prob:
                ret.append((prob, direction*np.inf))
                continue

            try:
                val = brentq(calc_prob, a_limit,
                             limit, rtol=.5e-4, args=prob)

            except ValueError:
                self.reset_vals()
                try:
                    val = brentq(calc_prob, start_val,
                                 limit, rtol=.5e-4, args=prob)
                except ValueError:
                    val = np.nan

            a_limit = val
            ret.append((prob, val))

        para.vary = True
        self.reset_vals()
        np.seterr(**orig_warn_settings)
        return ret
Ejemplo n.º 40
0
 def _find_edge_intersections(self):
     """
     Return a dictionary containing, for each edge in self.edges, a list
     of the positions at which the edge should be split.
     """
     edges = self.pts[self.edges]
     cuts = {}  # { edge: [(intercept, point), ...], ... }
     for i in range(edges.shape[0]-1):
         # intersection of edge i onto all others
         int1 = self._intersect_edge_arrays(edges[i:i+1], edges[i+1:])
         # intersection of all edges onto edge i
         int2 = self._intersect_edge_arrays(edges[i+1:], edges[i:i+1])
     
         # select for pairs that intersect
         err = np.geterr()
         np.seterr(divide='ignore', invalid='ignore')
         try:
             mask1 = (int1 >= 0) & (int1 <= 1)
             mask2 = (int2 >= 0) & (int2 <= 1)
             mask3 = mask1 & mask2  # all intersections
         finally:
             np.seterr(**err)
         
         # compute points of intersection
         inds = np.argwhere(mask3)[:, 0]
         if len(inds) == 0:
             continue
         h = int2[inds][:, np.newaxis]
         pts = (edges[i, 0][np.newaxis, :] * (1.0 - h) + 
                edges[i, 1][np.newaxis, :] * h)
         
         # record for all edges the location of cut points
         edge_cuts = cuts.setdefault(i, [])
         for j, ind in enumerate(inds):
             if 0 < int2[ind] < 1:
                 edge_cuts.append((int2[ind], pts[j]))
             if 0 < int1[ind] < 1:
                 other_cuts = cuts.setdefault(ind+i+1, [])
                 other_cuts.append((int1[ind], pts[j]))
     
     # sort all cut lists by intercept, remove duplicates
     for k, v in cuts.items():
         v.sort(key=lambda x: x[0])
         for i in range(len(v)-2, -1, -1):
             if v[i][0] == v[i+1][0]:
                 v.pop(i+1)
     return cuts
Ejemplo n.º 41
0
    def reduce(self, other, op=add, normalize_input=True, normalize_output=True):
        """Reduces two profiles with some operator and returns a new Profile
        
        other: Profile object
        op: operator (e.g. add, subtract, multiply, divide)
        normalize_input: whether the input profiles will be normalized
            before collapsing. The default is True.
        normalize_output: whether the output profile will be normalized.
            The default is True

        This function is intented for use on normalized profiles. For
        safety it'll try to normalize the data before collapsing them.
        If you do not normalize your data and set normalize_input to 
        False, you might get unexpected results. 
        
        It does check whether self.Data and other.Data have the same shape
        It does not check whether self and other have the same 
        CharOrder. The resulting Profile gets the alphabet and
        char order from self.

        """
        if self.Data.shape != other.Data.shape:
            raise ProfileError, "Cannot collapse profiles of different size: %s, %s" % (
                self.Data.shape,
                other.Data.shape,
            )
        if normalize_input:
            self.normalizePositions()
            other.normalizePositions()

        try:
            ##SUPPORT2425
            ori_err = numpy.geterr()
            numpy.seterr(divide="raise")
            try:
                new_data = op(self.Data, other.Data)
            finally:
                numpy.seterr(**ori_err)
            # with numpy_err(divide='raise'):
            # new_data = op(self.Data, other.Data)
        except (OverflowError, ZeroDivisionError, FloatingPointError):
            raise ProfileError, "Can't do operation on input profiles"
        result = Profile(new_data, self.Alphabet, self.CharOrder)

        if normalize_output:
            result.normalizePositions()
        return result
Ejemplo n.º 42
0
    def find_edge_intersections(self):
        """
        Return a dictionary containing, for each edge in self.edges, a list
        of the positions at which the edge should be split.
        """
        edges = self.pts[self.edges]
        cuts = {}  # { edge: [(intercept, point), ...], ... }
        for i in range(edges.shape[0] - 1):
            # intersection of edge i onto all others
            int1 = self.intersect_edge_arrays(edges[i:i + 1], edges[i + 1:])
            # intersection of all edges onto edge i
            int2 = self.intersect_edge_arrays(edges[i + 1:], edges[i:i + 1])

            # select for pairs that intersect
            err = np.geterr()
            np.seterr(divide='ignore', invalid='ignore')
            try:
                mask1 = (int1 >= 0) & (int1 <= 1)
                mask2 = (int2 >= 0) & (int2 <= 1)
                mask3 = mask1 & mask2  # all intersections
            finally:
                np.seterr(**err)

            # compute points of intersection
            inds = np.argwhere(mask3)[:, 0]
            if len(inds) == 0:
                continue
            h = int2[inds][:, np.newaxis]
            pts = (edges[i, 0][np.newaxis, :] * (1.0 - h) +
                   edges[i, 1][np.newaxis, :] * h)

            # record for all edges the location of cut points
            edge_cuts = cuts.setdefault(i, [])
            for j, ind in enumerate(inds):
                if 0 < int2[ind] < 1:
                    edge_cuts.append((int2[ind], pts[j]))
                if 0 < int1[ind] < 1:
                    other_cuts = cuts.setdefault(ind + i + 1, [])
                    other_cuts.append((int1[ind], pts[j]))

        # sort all cut lists by intercept, remove duplicates
        for k, v in cuts.items():
            v.sort(key=lambda x: x[0])
            for i in range(len(v) - 2, -1, -1):
                if v[i][0] == v[i + 1][0]:
                    v.pop(i + 1)
        return cuts
Ejemplo n.º 43
0
    def __apply_tsne(self, data):
        """

        :param data:
        :return:
        """
        restore_these_settings = np.geterr()

        temp_settings = restore_these_settings.copy()
        temp_settings["over"] = "ignore"
        temp_settings["under"] = "ignore"

        np.seterr(**temp_settings)
        tsne2 = TSNE(n_components=2, random_state=SEED)
        X_2d = tsne2.fit_transform(data)
        np.seterr(**restore_these_settings)
        return X_2d
Ejemplo n.º 44
0
def test_helmholtz_assemble(dtype, rtol, parallel):
    """Test the Laplace kernel."""
    from fast_green_kernel.direct_evaluator import assemble_helmholtz_kernel

    wavenumber = 2.5

    nsources = 10
    ntargets = 20

    if dtype == np.complex128:
        real_type = np.float64
    elif dtype == np.complex64:
        real_type = np.float32
    else:
        raise ValueError(f"Unsupported type: {dtype}.")

    rng = np.random.default_rng(seed=0)
    # Construct target and sources so that they do not overlap
    # apart from the first point.

    targets = 1.5 + rng.random((3, ntargets), dtype=real_type)
    sources = rng.random((3, nsources), dtype=real_type)
    sources[:, 0] = targets[:, 0]  # Test what happens if source = target

    actual = assemble_helmholtz_kernel(
        targets, sources, wavenumber, dtype=dtype, parallel=parallel
    )

    # Calculate expected result

    # A divide by zero error is expected to happen here.
    # So just ignore the warning.
    old_params = np.geterr()
    np.seterr(all="ignore")

    expected = np.empty((ntargets, nsources), dtype=dtype)

    for index, target in enumerate(targets.T):
        dist = np.linalg.norm(sources - target.reshape(3, 1), axis=0)
        expected[index, :] = np.exp(1j * wavenumber * dist) / (4 * np.pi * dist)
        expected[index, dist == 0] = 0

    # Reset the warnings
    np.seterr(**old_params)

    np.testing.assert_allclose(actual, expected, rtol=rtol)
Ejemplo n.º 45
0
    def distance_along_vector_to_limit(self, alpha, duvec):
        """ Returns a new alpha so that new_u = current_u + alpha*duvec does
        not violate any `lower` or `upper` limits if specified.

        Args
        -----
        alpha: float
            Initial value for step in gradient direction.
        duvec: `Vecwrapper`
            Direction to apply step. generally the gradient.

        Returns
        --------
        float
            New step size, backtracked to prevent violation."""

        # A single index of the gradient can be zero, so we want to suppress
        # the warnings from numpy.
        old_warn = numpy.geterr()
        numpy.seterr(divide='ignore')

        new_alpha = alpha
        for name, meta in iteritems(self):

            if 'remote' in meta:
                continue

            val = self[name]

            upper = meta.get('upper')
            if upper is not None:
                alpha_bound = numpy.min((upper - val)/duvec[name])
                if alpha_bound >= 0.0:
                    new_alpha = min(new_alpha, alpha_bound)

            lower = meta.get('lower')
            if lower is not None:
                alpha_bound = numpy.min((lower - val)/duvec[name])
                if alpha_bound >= 0.0:
                    new_alpha = min(new_alpha, alpha_bound)

        # Return numpy warn to what it was
        numpy.seterr(divide=old_warn['divide'])

        return max(0.0, new_alpha)
Ejemplo n.º 46
0
    def distance_along_vector_to_limit(self, alpha, duvec):
        """ Returns a new alpha so that new_u = current_u + alpha*duvec does
        not violate any `lower` or `upper` limits if specified.

        Args
        -----
        alpha: float
            Initial value for step in gradient direction.
        duvec: `Vecwrapper`
            Direction to apply step. generally the gradient.

        Returns
        --------
        float
            New step size, backtracked to prevent violation."""

        # A single index of the gradient can be zero, so we want to suppress
        # the warnings from numpy.
        old_warn = numpy.geterr()
        numpy.seterr(divide='ignore')

        new_alpha = alpha
        for name, meta in iteritems(self):

            if 'remote' in meta:
                continue

            val = self[name]

            upper = meta.get('upper')
            if upper is not None:
                alpha_bound = numpy.min((upper - val)/duvec[name])
                if alpha_bound >= 0.0:
                    new_alpha = min(new_alpha, alpha_bound)

            lower = meta.get('lower')
            if lower is not None:
                alpha_bound = numpy.min((lower - val)/duvec[name])
                if alpha_bound >= 0.0:
                    new_alpha = min(new_alpha, alpha_bound)

        # Return numpy warn to what it was
        numpy.seterr(divide=old_warn['divide'])

        return max(0.0, new_alpha)
Ejemplo n.º 47
0
 def test_testScalarArithmetic(self):
     xm = array(0, mask=1)
     #TODO FIXME: Find out what the following raises a warning in r8247
     err_status = numpy.geterr()
     try:
         numpy.seterr(divide='ignore')
         self.assertTrue((1 / array(0)).mask)
     finally:
         numpy.seterr(**err_status)
     self.assertTrue((1 + xm).mask)
     self.assertTrue((-xm).mask)
     self.assertTrue((-xm).mask)
     self.assertTrue(maximum(xm, xm).mask)
     self.assertTrue(minimum(xm, xm).mask)
     self.assertTrue(xm.filled().dtype is xm._data.dtype)
     x = array(0, mask=0)
     self.assertTrue(x.filled() == x._data)
     self.assertEqual(str(xm), str(masked_print_option))
Ejemplo n.º 48
0
    def __apply_tsne(self, data):
        """
        method used to apply the t-sne algorithm to reduce
        the problem dimension to 2d
        :param data: input data
        :return: 2d data
        """
        restore_these_settings = np.geterr()

        temp_settings = restore_these_settings.copy()
        temp_settings["over"] = "ignore"
        temp_settings["under"] = "ignore"

        np.seterr(**temp_settings)
        tsne2 = TSNE(n_components=2, random_state=SEED)
        X_2d = tsne2.fit_transform(data)
        np.seterr(**restore_these_settings)
        return X_2d
Ejemplo n.º 49
0
 def __init__(self,
              rtol=None,
              atol=None,
              equal_nan=None,
              casting='same_kind',
              err=None,
              dtype=None,
              sparse=False,
              **kw):
     err = err if err is not None else np.geterr()
     super(TensorIscloseConstant, self).__init__(_rtol=rtol,
                                                 _atol=atol,
                                                 _equal_nan=equal_nan,
                                                 _casting=casting,
                                                 _err=err,
                                                 _dtype=dtype,
                                                 _sparse=sparse,
                                                 **kw)
Ejemplo n.º 50
0
def intersect(v_origin, v_direction, plane_origin, plane_normal):
    vo = v_origin
    vd = v_direction
    po = plane_origin
    pn = plane_normal

    np_err = np.geterr()
    try:
        np.seterr(divide="ignore")
        t = np.stack(
            [np.sum(pn * (po - vo), axis=1) / np.sum(pn * vd, axis=1)], 1)
        # t[t < 0] = np.nan  # TODO: remove?
        t[np.isinf(t)] = np.nan
        intersection = vo + np.multiply(t, vd)
    finally:
        np.seterr(**np_err)

    return intersection
Ejemplo n.º 51
0
    def reduce(self,other,op=add,normalize_input=True,normalize_output=True):
        """Reduces two profiles with some operator and returns a new Profile
        
        other: Profile object
        op: operator (e.g. add, subtract, multiply, divide)
        normalize_input: whether the input profiles will be normalized
            before collapsing. The default is True.
        normalize_output: whether the output profile will be normalized.
            The default is True

        This function is intented for use on normalized profiles. For
        safety it'll try to normalize the data before collapsing them.
        If you do not normalize your data and set normalize_input to 
        False, you might get unexpected results. 
        
        It does check whether self.Data and other.Data have the same shape
        It does not check whether self and other have the same 
        CharOrder. The resulting Profile gets the alphabet and
        char order from self.

        """
        if self.Data.shape != other.Data.shape:
            raise ProfileError,\
                "Cannot collapse profiles of different size: %s, %s"\
                %(self.Data.shape,other.Data.shape)
        if normalize_input:
            self.normalizePositions()
            other.normalizePositions()
        
        try:
            ##SUPPORT2425
            ori_err = numpy.geterr()
            numpy.seterr(divide='raise')
            try: new_data = op(self.Data, other.Data)
            finally: numpy.seterr(**ori_err)
            #with numpy_err(divide='raise'):
                #new_data = op(self.Data, other.Data)
        except (OverflowError, ZeroDivisionError, FloatingPointError):
            raise ProfileError, "Can't do operation on input profiles"
        result = Profile(new_data, self.Alphabet, self.CharOrder)
        
        if normalize_output:
            result.normalizePositions()
        return result
Ejemplo n.º 52
0
    def join(self, rhs, func: Callable[[Union[np.ndarray, V], Union[np.ndarray, V]], Union[np.ndarray, M]],
             dtype: Optional[Type[M]] = None, inplace=False, ) -> "Chunk[M]":
        dtype = self.__dtype(dtype)

        rhs_is_chunk = isinstance(rhs, Chunk)

        # Fill value
        rhs_fill_value = rhs._fill_value if rhs_is_chunk else rhs
        fill_value = self._fill_value
        try:
            fill_value = func(self._fill_value, rhs_fill_value)
        except Exception as e:
            handling = np.geterr()['invalid']
            if handling == 'raise':
                raise e
            elif handling == 'ignore':
                pass
            else:
                warnings.warn("Fill value operand", RuntimeWarning, source=e)

        # Inplace selection
        if inplace:
            assert self._dtype == dtype
            c = self
            c._fill_value = fill_value
        else:

            c = self.copy(empty=True, dtype=dtype, fill_value=fill_value)

        # Func on values
        if rhs_is_chunk:
            val = func(self._value, rhs._value)
            if self._is_filled and rhs._is_filled:
                c.set_fill(val)
            else:
                c.set_array(val)
        else:
            val = func(self._value, rhs)
            if self._is_filled:
                c.set_fill(func(self._value, rhs))
            else:
                c.set_array(func(self._value, rhs))

        return c
Ejemplo n.º 53
0
def setup_module(module, verbosity=None):
    "set up test fixtures for testing"

    if __debug__:
        from mvpa2.base import debug
        # Lets add some targets which provide additional testing
        debug.active += ['CHECK_.*']

    verbosity = _get_verbosity(verbosity)

    # provide people with a hint about the warnings that might show up in a
    # second
    if verbosity:
        print("T: MVPA_SEED=%s" % _random_seed)
        if verbosity > 1:
            print('T: Testing for availability of external software packages.')

    # fully test of externals
    verbosity_dependencies = max(0, verbosity - 1)
    if verbosity_dependencies:
        externals.check_all_dependencies(verbosity=verbosity_dependencies)
    elif __debug__ and verbosity:  # pragma: no cover
        print(
            'T: Skipping testing of all dependencies since verbosity '
            '(MVPA_TESTS_VERBOSITY) is too low')

    # So we could see all warnings about missing dependencies
    _sys_settings['maxcount'] = warning.maxcount
    warning.maxcount = 1000

    if verbosity < 3:
        # no MVPA warnings during whole testsuite (but restore handlers later on)
        _sys_settings['handlers'] = warning.handlers
        warning.handlers = []

        # No python warnings (like ctypes version for slmr)
        import warnings
        warnings.simplefilter('ignore')

    if verbosity < 4:
        # No NumPy
        _sys_settings['np_errsettings'] = np.geterr()
        np.seterr(**dict([(x, 'ignore')
                          for x in _sys_settings['np_errsettings']]))
Ejemplo n.º 54
0
 def test_smoothingWindow(self):
     """
     Tests the creation of the smoothing window.
     """
     # Disable div by zero erros.
     temp = np.geterr()
     np.seterr(all='ignore')
     # Frequency of zero results in a delta peak at zero (there usually
     # should be just one zero in the frequency array.
     window = konnoOhmachiSmoothingWindow(
         np.array([0, 1, 0, 3], dtype='float32'), 0)
     np.testing.assert_array_equal(window,
                                   np.array([1, 0, 1, 0], dtype='float32'))
     # Wrong dtypes raises.
     self.assertRaises(ValueError, konnoOhmachiSmoothingWindow,
                       np.arange(10, dtype='int32'), 10)
     # If frequency=center frequency, log results in infinity. Limit of
     # whole formulae is 1.
     window = konnoOhmachiSmoothingWindow(
         np.array([5.0, 1.0, 5.0, 2.0], dtype='float32'), 5)
     np.testing.assert_array_equal(window[[0, 2]],
                                   np.array([1.0, 1.0], dtype='float32'))
     # Output dtype should be the dtype of frequencies.
     self.assertEqual(
         konnoOhmachiSmoothingWindow(np.array([1, 6, 12], dtype='float32'),
                                     5).dtype, np.float32)
     self.assertEqual(
         konnoOhmachiSmoothingWindow(np.array([1, 6, 12], dtype='float64'),
                                     5).dtype, np.float64)
     # Check if normalizing works.
     window = konnoOhmachiSmoothingWindow(self.frequencies, 20)
     self.assertTrue(window.sum() > 1.0)
     window = konnoOhmachiSmoothingWindow(self.frequencies,
                                          20,
                                          normalize=True)
     self.assertAlmostEqual(window.sum(), 1.0, 5)
     # Just one more to test if there are no invalid values and the range if
     # ok.
     window = konnoOhmachiSmoothingWindow(self.frequencies, 20)
     self.assertEqual(np.any(np.isnan(window)), False)
     self.assertEqual(np.any(np.isinf(window)), False)
     self.assertTrue(np.all(window <= 1.0))
     self.assertTrue(np.all(window >= 0.0))
     np.seterr(**temp)
Ejemplo n.º 55
0
def test_laplace_evaluate_values_and_deriv(dtype, rtol, parallel):
    """Test the Laplace kernel."""
    from fast_green_kernel.direct_evaluator import evaluate_laplace_kernel

    nsources = 10
    ntargets = 20
    ncharge_vecs = 2

    rng = np.random.default_rng(seed=0)
    # Construct target and sources so that they do not overlap
    # apart from the first point.

    targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
    sources = rng.random((3, nsources), dtype=dtype)
    sources[:, 0] = targets[:, 0]  # Test what happens if source = target
    charges = rng.random((ncharge_vecs, nsources), dtype=dtype)

    actual = evaluate_laplace_kernel(
        targets, sources, charges, dtype=dtype, return_gradients=True, parallel=parallel
    )

    # Calculate expected result

    # A divide by zero error is expected to happen here.
    # So just ignore the warning.
    old_params = np.geterr()
    np.seterr(all="ignore")

    expected = np.empty((nsources, ntargets, 4), dtype=dtype)

    for index, target in enumerate(targets.T):
        diff = sources - target.reshape(3, 1)
        dist = np.linalg.norm(diff, axis=0)
        expected[:, index, 0] = 1.0 / (4 * np.pi * dist)
        expected[:, index, 1:] = diff.T / (4 * np.pi * dist.reshape(nsources, 1) ** 3)
        expected[dist == 0, index, :] = 0

    # Reset the warnings
    np.seterr(**old_params)

    expected = np.tensordot(charges, expected, 1)

    np.testing.assert_allclose(actual, expected, rtol=rtol)
Ejemplo n.º 56
0
def setup(self,
          np=np,
          numpy_version=numpy_version,
          StrictVersion=StrictVersion,
          new_pandas=new_pandas):
    """Lives in zipline.__init__ for doctests."""

    if numpy_version >= StrictVersion('1.14'):
        self.old_opts = np.get_printoptions()
        np.set_printoptions(legacy='1.13')
    else:
        self.old_opts = None

    if new_pandas:
        self.old_err = np.geterr()
        # old pandas has numpy compat that sets this
        np.seterr(all='ignore')
    else:
        self.old_err = None
Ejemplo n.º 57
0
def test_laplace_evaluate_only_values(dtype, rtol, parallel):
    """Test the Laplace kernel."""
    from fast_green_kernel.direct_evaluator import evaluate_laplace_kernel

    nsources = 10
    ntargets = 20
    ncharge_vecs = 2

    rng = np.random.default_rng(seed=0)
    # Construct target and sources so that they do not overlap
    # apart from the first point.

    targets = 1.5 + rng.random((3, ntargets), dtype=dtype)
    sources = rng.random((3, nsources), dtype=dtype)
    sources[:, 0] = targets[:, 0]  # Test what happens if source = target
    charges = rng.random((ncharge_vecs, nsources), dtype=dtype)

    actual = evaluate_laplace_kernel(
        targets, sources, charges, dtype=dtype, parallel=parallel
    )

    # Calculate expected result

    # A divide by zero error is expected to happen here.
    # So just ignore the warning.
    old_param = np.geterr()["divide"]
    np.seterr(divide="ignore")

    expected = np.empty((nsources, ntargets), dtype=dtype)

    for index, target in enumerate(targets.T):
        expected[:, index] = 1.0 / (
            4 * np.pi * np.linalg.norm(sources - target.reshape(3, 1), axis=0)
        )

    # Reset the warnings
    np.seterr(divide=old_param)

    expected[0, 0] = 0  # First source and target are identical.

    expected = np.expand_dims(charges @ expected, -1)

    np.testing.assert_allclose(actual, expected, rtol=rtol)
Ejemplo n.º 58
0
    def _calc_rot_mtx(self, c1, c2, c3, dbg=False):
        """Return rotation matrix transforming Cartesian system to system of alpha carbon c2 in sequence of alpha carbons c1, c2, c3.

        Arguments:
        c1, c2, c3 -- position vectors of subsequent alpha carbons.

        Returns matrix and distance detween c1 and c3.
        """
        if dbg:
            import imp
            pdbx = imp.load_source('test', '/usr/lib/python2.7/pdb.py')
            pdbx.set_trace()
        x, y, z, rdnorm = self._mk_local_system(c1, c2, c3)

        setting = np.geterr()
        np.seterr(all='raise')
        try:
            w = self._calc_nodes_line(np.array((0, 0, 1)), z)
        except FloatingPointError:
            w = x
        np.seterr(**setting)

        cph, sph = self._calc_trig_fnc(np.array((1, 0, 0)), w,
                                       np.array((0, 0, 1)))
        # phi angle trig fncs -- rotation around z axis so x -> w
        #~ sph = np.linalg.norm(cpht) * np.sign(np.dot(cpht, np.array((0, 0, 1))))

        cps, sps = self._calc_trig_fnc(w, x, z)
        # psi angle -- rotation around z' so x -> x'
        #~ sps = np.linalg.norm(cwx) * np.sign(np.dot(cwx, z))

        cth, sth = self._calc_trig_fnc(np.array((0, 0, 1)), z, w)
        # theta angle -- rotation around nodes line to transform z on z'

        rot = np.matrix([[
            cps * cph - sps * sph * cth, sph * cps + sps * cth * cph, sps * sth
        ],
                         [
                             -1 * sps * cph - sph * cps * cth,
                             -1 * sps * sph + cps * cth * cph, cps * sth
                         ], [sth * sph, -sth * cph, cth]])

        return rot, rdnorm
Ejemplo n.º 59
0
def newell_surf_normal(poly):
    # This algorithm is  from the Newell's method pseudo-code found at:
    # https://www.khronos.org/opengl/wiki/Calculating_a_Surface_Normal

    old_settings = np.geterr()
    np.seterr(invalid='ignore')
    norm = np.array([0.0, 0.0, 0.0])
    for i in range(len(poly)):
        v_curr = poly[i]
        v_next = poly[(i + 1) % len(poly)]
        norm[0] += (v_curr[1] - v_next[1]) * (v_curr[2] + v_next[2])
        norm[1] += (v_curr[2] - v_next[2]) * (v_curr[0] + v_next[0])
        norm[2] += (v_curr[0] - v_next[0]) * (v_curr[1] + v_next[1])

    mag_norm = algc.mag(norm)
    unit_norm = norm / mag_norm

    np.seterr(invalid=old_settings['invalid'])
    return unit_norm