Example #1
0
def setup_argument_parser():
    build_obj = MozbuildObject.from_environment(cwd=here)

    build_path = os.path.join(build_obj.topobjdir, 'build')
    if build_path not in sys.path:
        sys.path.append(build_path)

    mochitest_dir = os.path.join(build_obj.topobjdir, '_tests', 'testing', 'mochitest')

    with warnings.catch_warnings():
        warnings.simplefilter('ignore')

        import imp
        path = os.path.join(build_obj.topobjdir, mochitest_dir, 'runtests.py')
        if not os.path.exists(path):
            path = os.path.join(here, "runtests.py")

        with open(path, 'r') as fh:
            imp.load_module('mochitest', fh, path,
                            ('.py', 'r', imp.PY_SOURCE))

        from mochitest_options import MochitestArgumentParser

    if conditions.is_android(build_obj):
        # On Android, check for a connected device (and offer to start an
        # emulator if appropriate) before running tests. This check must
        # be done in this admittedly awkward place because
        # MochitestArgumentParser initialization fails if no device is found.
        from mozrunner.devices.android_device import verify_android_device
        verify_android_device(build_obj, install=True, xre=True)

    global parser
    parser = MochitestArgumentParser()
    return parser
Example #2
0
def text_traceback():
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        res = 'the original traceback:'.join(
            cgitb.text(sys.exc_info()).split('the original traceback:')[1:]
        ).strip()
    return res
    def __compute_alternative_params(self):
        # Copied directly from skopt
        transformed_bounds = np.array(self.__opt.space.transformed_bounds)
        est = clone(self.__opt.base_estimator)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            est.fit(self.__opt.space.transform(self.__opt.Xi), self.__opt.yi)

        X = self.__opt.space.transform(self.__opt.space.rvs(
            n_samples=self.__opt.n_points, random_state=self.__opt.rng))

        values = _gaussian_acquisition(X=X, model=est, y_opt=np.min(self.__opt.yi),
                                       acq_func='EI',
                                       acq_func_kwargs=dict(n_points=10000))

        print('original point ei: %s' % np.min(values))
        discount_width = .5
        values = self.__discount_leased_params(X, values, discount_width)
        while np.min(values) > -1e-5 and discount_width > 1e-2:
            discount_width *= .9
            values = _gaussian_acquisition(X=X, model=est, y_opt=np.min(self.__opt.yi),
                                           acq_func='EI',
                                           acq_func_kwargs=dict(n_points=10000))
            values = self.__discount_leased_params(X, values, discount_width)
        next_x = X[np.argmin(values)]
        print('new point ei: %s' % np.min(values))

        if not self.__opt.space.is_categorical:
            next_x = np.clip(next_x, transformed_bounds[:, 0], transformed_bounds[:, 1])

        return self.__opt.space.inverse_transform(next_x.reshape((1, -1)))[0]
Example #4
0
 def test_parseable_output_deprecated(self):
     with warnings.catch_warnings(record=True) as cm:
         warnings.simplefilter("always")
         ParseableTextReporter()
     
     self.assertEqual(len(cm), 1)
     self.assertIsInstance(cm[0].message, DeprecationWarning)
Example #5
0
    def test_op(self):
        # motor.Op is deprecated in Motor 0.2, superseded by Tornado 3 Futures.
        # Just make sure it still works.

        collection = self.cx.pymongo_test.test_collection
        doc = {'_id': 'jesse'}

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")

            # Op works.
            _id = yield motor.Op(collection.insert, doc)
            self.assertEqual('jesse', _id)

            # Raised a DeprecationWarning.
            self.assertEqual(1, len(w))
            warning = w[-1]
            self.assertTrue(issubclass(warning.category, DeprecationWarning))
            message = str(warning.message)
            self.assertTrue("deprecated" in message)
            self.assertTrue("insert" in message)

        result = yield motor.Op(collection.find_one, doc)
        self.assertEqual(doc, result)

        # Make sure it works with no args.
        result = yield motor.Op(collection.find_one)
        self.assertTrue(isinstance(result, dict))

        with assert_raises(pymongo.errors.DuplicateKeyError):
            yield motor.Op(collection.insert, doc)
Example #6
0
File: api.py Project: RinsDev/vk
    def __init__(self, app_id=None, user_login=None, user_password=None, access_token=None, user_email=None,
                 scope='offline', timeout=1, api_version='5.20'):

        user_login = user_login or user_email

        if (not user_login or not user_password) and not access_token:
            raise ValueError('Arguments user_login and user_password, or access_token are required')

        if user_email:  # deprecated at April 11, 2014
            warnings.simplefilter('once')
            warnings.warn("Use 'user_login' instead of deprecated 'user_email'", DeprecationWarning, stacklevel=2)

        self.app_id = app_id

        self.user_login = user_login
        self.user_password = user_password

        self.access_token = access_token
        self.scope = scope or ''
        
        self.api_version = api_version

        self._default_timeout = timeout

        self.session = requests.Session()
        self.session.headers['Accept'] = 'application/json'
        self.session.headers['Content-Type'] = 'application/x-www-form-urlencoded'

        if not access_token and user_login and user_password:
            self.get_access_token()
 def test_idlever(self):
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         import idlelib.idlever
         self.assertEqual(len(w), 1)
         self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
         self.assertIn("version", str(w[-1].message))
def test_io_inverse_operator():
    """Test IO of inverse_operator
    """
    tempdir = _TempDir()
    inverse_operator = read_inverse_operator(fname_inv)
    x = repr(inverse_operator)
    assert_true(x)
    assert_true(isinstance(inverse_operator['noise_cov'], Covariance))
    # just do one example for .gz, as it should generalize
    _compare_io(inverse_operator, '.gz')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        inv_badname = op.join(tempdir, 'test-bad-name.fif.gz')
        write_inverse_operator(inv_badname, inverse_operator)
        read_inverse_operator(inv_badname)
    assert_naming(w, 'test_inverse.py', 2)

    # make sure we can write and read
    inv_fname = op.join(tempdir, 'test-inv.fif')
    args = (10, 1. / 9., 'dSPM')
    inv_prep = prepare_inverse_operator(inverse_operator, *args)
    write_inverse_operator(inv_fname, inv_prep)
    inv_read = read_inverse_operator(inv_fname)
    _compare(inverse_operator, inv_read)
    inv_read_prep = prepare_inverse_operator(inv_read, *args)
    _compare(inv_prep, inv_read_prep)
    inv_prep_prep = prepare_inverse_operator(inv_prep, *args)
    _compare(inv_prep, inv_prep_prep)
Example #9
0
def __unit_test_onset_function(metric):
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # First, test for a warning on empty onsets
        metric(np.array([]), np.arange(10))
        assert len(w) == 1
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Reference onsets are empty."
        metric(np.arange(10), np.array([]))
        assert len(w) == 2
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Estimated onsets are empty."
        # And that the metric is 0
        assert np.allclose(metric(np.array([]), np.array([])), 0)

    # Now test validation function - onsets must be 1d ndarray
    onsets = np.array([[1., 2.]])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be in seconds (so not huge)
    onsets = np.array([1e10, 1e11])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)
    # onsets must be sorted
    onsets = np.array([2., 1.])
    nose.tools.assert_raises(ValueError, metric, onsets, onsets)

    # Valid onsets which are the same produce a score of 1 for all metrics
    onsets = np.arange(10, dtype=np.float)
    assert np.allclose(metric(onsets, onsets), 1)
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
                      precompute=False, eps=np.finfo(np.float).eps,
                      max_iter=500):
    X = X[safe_mask(X, mask)]
    y = y[mask]

    # Center X and y to avoid fit the intercept
    X -= X.mean(axis=0)
    y -= y.mean()

    alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))

    X = (1 - weights) * X
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', ConvergenceWarning)
        alphas_, _, coef_ = lars_path(X, y,
                                      Gram=precompute, copy_X=False,
                                      copy_Gram=False, alpha_min=np.min(alpha),
                                      method='lasso', verbose=verbose,
                                      max_iter=max_iter, eps=eps)

    if len(alpha) > 1:
        if len(alphas_) > 1:  # np.min(alpha) < alpha_min
            interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
                                    bounds_error=False, fill_value=0.)
            scores = (interpolator(alpha) != 0.0)
        else:
            scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
    else:
        scores = coef_[:, -1] != 0.0
    return scores
def _lasso_stability_path(X, y, mask, weights, eps):
    "Inner loop of lasso_stability_path"
    X = X * weights[np.newaxis, :]
    X = X[safe_mask(X, mask), :]
    y = y[mask]

    alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
    alpha_min = eps * alpha_max  # set for early stopping in path
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', ConvergenceWarning)
        alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
                                     alpha_min=alpha_min)
    # Scale alpha by alpha_max
    alphas /= alphas[0]
    # Sort alphas in assending order
    alphas = alphas[::-1]
    coefs = coefs[:, ::-1]
    # Get rid of the alphas that are too small
    mask = alphas >= eps
    # We also want to keep the first one: it should be close to the OLS
    # solution
    mask[0] = True
    alphas = alphas[mask]
    coefs = coefs[:, mask]
    return alphas, coefs
Example #12
0
 def test_writing_invalid_quakeml_id(self):
     """
     Some ids might be invalid. We still want to write them to not mess
     with any external tools relying on the ids. But we also raise a
     warning of course.
     """
     filename = os.path.join(self.path, 'invalid_id.xml')
     cat = read_events(filename)
     self.assertEqual(
         cat[0].resource_id.id,
         "smi:org.gfz-potsdam.de/geofon/RMHP(60)>>ITAPER(3)>>BW(4,5,15)")
     with NamedTemporaryFile() as tf:
         with warnings.catch_warnings(record=True) as w:
             warnings.simplefilter("always")
             cat.write(tf.name, format="quakeml")
             cat2 = read_events(tf.name)
     self.assertEqual(len(w), 19)
     self.assertEqual(
         w[0].message.args[0],
         "'smi:org.gfz-potsdam.de/geofon/RMHP(60)>>ITAPER(3)>>BW(4,5,15)' "
         "is not a valid QuakeML URI. It will be in the final file but "
         "note that the file will not be a valid QuakeML file.")
     self.assertEqual(
         cat2[0].resource_id.id,
         "smi:org.gfz-potsdam.de/geofon/RMHP(60)>>ITAPER(3)>>BW(4,5,15)")
Example #13
0
 def test_usgs_eventype(self):
     filename = os.path.join(self.path, 'usgs_event.xml')
     with warnings.catch_warnings(record=True):
         warnings.simplefilter("ignore")
         catalog = _read_quakeml(filename)
     self.assertEqual(len(catalog), 1)
     self.assertEqual(catalog[0].event_type, 'quarry blast')
    def test_warningsConfiguredAsErrors(self):
        """
        If a warnings filter has been installed which turns warnings into
        exceptions, tests have an error added to the reporter for them for each
        unflushed warning.
        """
        class CustomWarning(Warning):
            pass

        result = TestResult()
        case = Mask.MockTests('test_unflushed')
        case.category = CustomWarning

        originalWarnings = warnings.filters[:]
        try:
            warnings.simplefilter('error')
            case.run(result)
            self.assertEqual(len(result.errors), 1)
            self.assertIdentical(result.errors[0][0], case)
            self.assertTrue(
                # Different python versions differ in whether they report the
                # fully qualified class name or just the class name.
                result.errors[0][1].splitlines()[-1].endswith(
                    "CustomWarning: some warning text"))
        finally:
            warnings.filters[:] = originalWarnings
    def test_get_default_base_name_deprecation(self):
        msg = "`CustomRouter.get_default_base_name` method should be renamed `get_default_basename`."

        # Class definition should raise a warning
        with pytest.warns(RemovedInDRF311Warning) as w:
            warnings.simplefilter('always')

            class CustomRouter(SimpleRouter):
                def get_default_base_name(self, viewset):
                    return 'foo'

        assert len(w) == 1
        assert str(w[0].message) == msg

        # Deprecated method implementation should still be called
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter('always')

            router = CustomRouter()
            router.register('mock', MockViewSet)

        assert len(w) == 0
        assert router.registry == [
            ('mock', MockViewSet, 'foo'),
        ]
Example #16
0
    def _assert_warns(warning_class, func, *args, **kw):
        r"""
        Fail unless the given callable throws the specified warning.

        This definition is copypasted from numpy 1.9.0.dev.
        The version in earlier numpy returns None.

        Parameters
        ----------
        warning_class : class
            The class defining the warning that `func` is expected to throw.
        func : callable
            The callable to test.
        *args : Arguments
            Arguments passed to `func`.
        **kwargs : Kwargs
            Keyword arguments passed to `func`.

        Returns
        -------
        The value returned by `func`.

        """
        with warnings.catch_warnings(record=True) as l:
            warnings.simplefilter("always")
            result = func(*args, **kw)
            if not len(l) > 0:
                raise AssertionError("No warning raised when calling %s" % func.__name__)
            if not l[0].category is warning_class:
                raise AssertionError(
                    "First warning for %s is not a " "%s( is %s)" % (func.__name__, warning_class, l[0])
                )
        return result
Example #17
0
def test_unicode_decode_error():
    # decode_error default to strict, so this should fail
    # First, encode (as bytes) a unicode string.
    text = "J'ai mang\xe9 du kangourou  ce midi, c'\xe9tait pas tr\xeas bon."
    text_bytes = text.encode('utf-8')

    # Then let the Analyzer try to decode it as ascii. It should fail,
    # because we have given it an incorrect encoding.
    wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
    assert_raises(UnicodeDecodeError, wa, text_bytes)

    ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
                         encoding='ascii').build_analyzer()
    assert_raises(UnicodeDecodeError, ca, text_bytes)

    # Check the old interface
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always")

        ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
                             charset='ascii').build_analyzer()
        assert_raises(UnicodeDecodeError, ca, text_bytes)

        assert_equal(len(w), 1)
        assert_true(issubclass(w[0].category, DeprecationWarning))
        assert_true("charset" in str(w[0].message).lower())
Example #18
0
 def test_generated(self):
     """Write and read back odd SeqRecord objects"""
     record1 = SeqRecord(Seq("ACGT"*500, generic_dna),  id="Test", description="Long "*500,
                        letter_annotations={"phred_quality":[40,30,20,10]*500})
     record2 = SeqRecord(MutableSeq("NGGC"*1000),  id="Mut", description="very "*1000+"long",
                        letter_annotations={"phred_quality":[0,5,5,10]*1000})
     record3 = SeqRecord(UnknownSeq(2000,character="N"),  id="Unk", description="l"+("o"*1000)+"ng",
                        letter_annotations={"phred_quality":[0,1]*1000})
     record4 = SeqRecord(Seq("ACGT"*500),  id="no_descr", description="", name="",
                        letter_annotations={"phred_quality":[40,50,60,62]*500})
     record5 = SeqRecord(Seq("",generic_dna),  id="empty_p", description="(could have been trimmed lots)",
                        letter_annotations={"phred_quality":[]})
     record6 = SeqRecord(Seq(""),  id="empty_s", description="(could have been trimmed lots)",
                        letter_annotations={"solexa_quality":[]})
     record7 = SeqRecord(Seq("ACNN"*500),  id="Test_Sol", description="Long "*500,
                        letter_annotations={"solexa_quality":[40,30,0,-5]*500})
     record8 = SeqRecord(Seq("ACGT"),  id="HighQual", description="With very large qualities that even Sanger FASTQ can't hold!",
                        letter_annotations={"solexa_quality":[0,10,100,1000]})
     #TODO - Record with no identifier?
     records = [record1, record2, record3, record4, record5, record6, record7, record8]
     #TODO - Have a Biopython defined "DataLossWarning?"
     warnings.simplefilter('ignore', BiopythonWarning)
     #TODO - Include phd output?
     for format in ["fasta", "fastq", "fastq-solexa", "fastq-illumina", "qual"]:
         handle = StringIO()
         SeqIO.write(records, handle, format)
         handle.seek(0)
         compare_records(records,
                         list(SeqIO.parse(handle, format)),
                         truncation_expected(format))
     warnings.filters.pop()
Example #19
0
def test_1d_shape():
    # Current 5 behavior is 1D -> column vector
    arr = np.arange(5)
    stream = BytesIO()
    warn_ctx = WarningManager()
    warn_ctx.__enter__()
    try:
        # silence warnings for tests
        warnings.simplefilter('ignore')
        savemat(stream, {'oned':arr}, format='5')
        vals = loadmat(stream)
        assert_equal(vals['oned'].shape, (5,1))
        # Current 4 behavior is 1D -> row vector
        stream = BytesIO()
        savemat(stream, {'oned':arr}, format='4')
        vals = loadmat(stream)
        assert_equal(vals['oned'].shape, (1, 5))
        for format in ('4', '5'):
            # can be explicitly 'column' for oned_as
            stream = BytesIO()
            savemat(stream, {'oned':arr},
                    format=format,
                    oned_as='column')
            vals = loadmat(stream)
            assert_equal(vals['oned'].shape, (5,1))
            # but different from 'row'
            stream = BytesIO()
            savemat(stream, {'oned':arr},
                    format=format,
                    oned_as='row')
            vals = loadmat(stream)
            assert_equal(vals['oned'].shape, (1,5))
    finally:
        warn_ctx.__exit__()
Example #20
0
def _validate_regex(pattern, flags):
    """Check if the given regex is valid.

    This is more complicated than it could be since there's a warning on
    invalid escapes with newer Python versions, and we want to catch that case
    and treat it as invalid.
    """
    with warnings.catch_warnings(record=True) as recorded_warnings:
        warnings.simplefilter('always')
        try:
            re.compile(pattern, flags)
        except re.error as e:
            raise configexc.ValidationError(
                pattern, "must be a valid regex - " + str(e))
        except RuntimeError:
            raise configexc.ValidationError(
                pattern, "must be a valid regex - recursion depth exceeded")

    for w in recorded_warnings:
        if (issubclass(w.category, DeprecationWarning) and
                str(w.message).startswith('bad escape')):
            raise configexc.ValidationError(
                pattern, "must be a valid regex - " + str(w.message))
        else:
            warnings.warn(w.message)
Example #21
0
 def test_deprecated_callbacks(self):
     # Tests that callback functions that return values are still supported but that warnings are generated
     
     def returns_cube(cube, field, filename):
         return cube
         
     def returns_no_cube(cube, field, filename):
         return iris.io.NO_CUBE
         
     fname = tests.get_data_path(["PP", "trui", "air_temp_init", "200812011200__qwqu12ff.initanl.pp"])
     
     # Catch all warnings for returns_cube
     with warnings.catch_warnings(record=True) as generated_warnings_cube:
         warnings.simplefilter("always")
         r = iris.load(fname, callback=returns_cube)
         
         # Test that our warnings are present in the generated warnings:
         gen_warnings_cube = [str(x.message) for x in generated_warnings_cube]
         self.assertIn(iris.io.CALLBACK_DEPRECATION_MSG, gen_warnings_cube, "Callback deprecation warning message not issued.")
     
     # Catch all warnings for returns_no_cube
     with warnings.catch_warnings(record=True) as generated_warnings_no_cube:
         warnings.simplefilter("always")  
         r = iris.load(fname, callback=returns_no_cube)
         
         # Test that our warnings are present in the generated warnings:
         gen_warnings_no_cube = [str(x.message) for x in generated_warnings_no_cube]
         self.assertIn(iris.io.CALLBACK_DEPRECATION_MSG, gen_warnings_no_cube, "Callback deprecation warning message not issued.")
Example #22
0
 def test_not_reset(self):
     loader = self.DummyLoader()
     loader.module = types.ModuleType('blah')
     loader.module.__loader__ = 42
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', DeprecationWarning)
         self.assertEqual(42, loader.load_module('blah').__loader__)
Example #23
0
 def test_decorator_attrs(self):
     def fxn(module): pass
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', DeprecationWarning)
         wrapped = self.util.set_package(fxn)
     self.assertEqual(wrapped.__name__, fxn.__name__)
     self.assertEqual(wrapped.__qualname__, fxn.__qualname__)
Example #24
0
 def setUp(self):
     super(converterTestsCDF, self).setUp()
     self.SDobj = dm.SpaceData(attrs={'global': 'test'})
     self.SDobj['var'] = dm.dmarray([1, 2, 3], attrs={'a': 'a'})
     self.testdir = tempfile.mkdtemp()
     self.testfile = os.path.join(self.testdir, 'test.cdf')
     warnings.simplefilter('error', dm.DMWarning)
Example #25
0
 def tearDown(self):
     super(converterTestsCDF, self).tearDown()
     del self.SDobj
     if os.path.exists(self.testfile):
         os.remove(self.testfile)
     os.rmdir(self.testdir)
     warnings.simplefilter('default', dm.DMWarning)
Example #26
0
    def _evaluate_projection(self, x, y):
        """
        kNNEvaluate - evaluate class separation in the given projection using a k-NN method
        Parameters
        ----------
        x - variables to evaluate
        y - class

        Returns
        -------
        scores
        """
        if self.percent_data_used != 100:
            rand = np.random.choice(len(x), int(len(x) * self.percent_data_used / 100),
                                    replace=False)
            x = x[rand]
            y = y[rand]
        neigh = KNeighborsClassifier(n_neighbors=3) if self.attr_color.is_discrete else \
            KNeighborsRegressor(n_neighbors=3)
        assert ~(np.isnan(x).any(axis=None) | np.isnan(x).any(axis=None))
        neigh.fit(x, y)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=UserWarning)
            scores = cross_val_score(neigh, x, y, cv=3)
        return scores.mean()
Example #27
0
    def properties(self):
        """
        return a dictionary mapping property name -> value
        """
        o = self.oorig
        getters = [name for name in dir(o)
                   if name.startswith('get_')
                   and six.callable(getattr(o, name))]
        getters.sort()
        d = dict()
        for name in getters:
            func = getattr(o, name)
            if self.is_alias(func):
                continue

            try:
                with warnings.catch_warnings():
                    warnings.simplefilter('ignore')
                    val = func()
            except:
                continue
            else:
                d[name[4:]] = val

        return d
Example #28
0
 def test_attribute_is_None(self):
     loader = self.DummyLoader()
     loader.module = types.ModuleType('blah')
     loader.module.__loader__ = None
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', DeprecationWarning)
         self.assertEqual(loader, loader.load_module('blah').__loader__)
Example #29
0
def test_ica_rank_reduction():
    """Test recovery of full data when no source is rejected"""
    # Most basic recovery
    raw = Raw(raw_fname).crop(0.5, stop, False)
    raw.load_data()
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')[:10]
    n_components = 5
    max_pca_components = len(picks)
    for n_pca_components in [6, 10]:
        with warnings.catch_warnings(record=True):  # non-convergence
            warnings.simplefilter('always')
            ica = ICA(n_components=n_components,
                      max_pca_components=max_pca_components,
                      n_pca_components=n_pca_components,
                      method='fastica', max_iter=1).fit(raw, picks=picks)

        rank_before = raw.estimate_rank(picks=picks)
        assert_equal(rank_before, len(picks))
        raw_clean = ica.apply(raw, copy=True)
        rank_after = raw_clean.estimate_rank(picks=picks)
        # interaction between ICA rejection and PCA components difficult
        # to preduct. Rank_after often seems to be 1 higher then
        # n_pca_components
        assert_true(n_components < n_pca_components <= rank_after <=
                    rank_before)
Example #30
0
def test_PR_424():
    """Ensure deprecation and user warnings are triggered."""
    import warnings
    warnings.simplefilter('always') # Alert us of deprecation warnings.

    # Recommended use
    ColorClip([1000, 600], color=(60, 60, 60), duration=10).close()

    with pytest.warns(DeprecationWarning):
        # Uses `col` so should work the same as above, but give warning.
        ColorClip([1000, 600], col=(60, 60, 60), duration=10).close()

    # Catch all warnings as record.
    with pytest.warns(None) as record:
        # Should give 2 warnings and use `color`, not `col`
        ColorClip([1000, 600], color=(60, 60, 60), duration=10, col=(2,2,2)).close()

    message1 = 'The `ColorClip` parameter `col` has been deprecated. ' + \
               'Please use `color` instead.'
    message2 = 'The arguments `color` and `col` have both been passed to ' + \
               '`ColorClip` so `col` has been ignored.'

    # Assert that two warnings popped and validate the message text.
    assert len(record) == 2
    assert str(record[0].message) == message1
    assert str(record[1].message) == message2
Example #31
0
 def assertHFToolsDeprecationWarning(self, funk, *k, **kw):
     warnings.resetwarnings()
     warnings.simplefilter("error", HFToolsDeprecationWarning)
     self.assertRaises(HFToolsDeprecationWarning, funk, *k, **kw)
     warnings.simplefilter("ignore", HFToolsDeprecationWarning)
Example #32
0
def assert_warns_message(warning_class, message, func, *args, **kw):
    # very important to avoid uncontrolled state propagation
    """Test that a certain warning occurs and with a certain message.

    Parameters
    ----------
    warning_class : the warning class
        The class to test for, e.g. UserWarning.

    message : str | callable
        The message or a substring of the message to test for. If callable,
        it takes a string as the argument and will trigger an AssertionError
        if the callable returns `False`.

    func : callable
        Callable object to trigger warnings.

    *args : the positional arguments to `func`.

    **kw : the keyword arguments to `func`.

    Returns
    -------
    result : the return value of `func`

    """
    clean_warning_registry()
    with warnings.catch_warnings(record=True) as w:
        # Cause all warnings to always be triggered.
        warnings.simplefilter("always")
        if hasattr(np, 'VisibleDeprecationWarning'):
            # Let's not catch the numpy internal DeprecationWarnings
            warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
        # Trigger a warning.
        result = func(*args, **kw)
        # Verify some things
        if not len(w) > 0:
            raise AssertionError("No warning raised when calling %s" %
                                 func.__name__)

        found = [issubclass(warning.category, warning_class) for warning in w]
        if not any(found):
            raise AssertionError("No warning raised for %s with class "
                                 "%s" % (func.__name__, warning_class))

        message_found = False
        # Checks the message of all warnings belong to warning_class
        for index in [i for i, x in enumerate(found) if x]:
            # substring will match, the entire message with typo won't
            msg = w[index].message  # For Python 3 compatibility
            msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
            if callable(message):  # add support for certain tests
                check_in_message = message
            else:
                check_in_message = lambda msg: message in msg

            if check_in_message(msg):
                message_found = True
                break

        if not message_found:
            raise AssertionError("Did not receive the message you expected "
                                 "('%s') for <%s>, got: '%s'" %
                                 (message, func.__name__, msg))

    return result
Example #33
0
import os
import sys
# Hide the "FutureWarning: pandas.util.testing is deprecated." caused by empyrical
import warnings
from pydoc import locate

import click
import pkg_resources

import jesse.helpers as jh

warnings.simplefilter(action='ignore', category=FutureWarning)

# Python version validation.
if jh.python_version() < 3.7:
    print(
        jh.color(
            'Jesse requires Python version above 3.7. Yours is {}'.format(
                jh.python_version()), 'red'))

# fix directory issue
sys.path.insert(0, os.getcwd())

ls = os.listdir('.')
is_jesse_project = 'strategies' in ls and 'config.py' in ls and 'storage' in ls and 'routes.py' in ls


def validate_cwd() -> None:
    """
    make sure we're in a Jesse project
    """
Example #34
0
 def check(attr):
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         assert getattr(d, attr) is d
     assert len(w) == 1
     assert w[0].category is DeprecationWarning
Example #35
0
import seaborn as sns
import torch
import torch.multiprocessing as mp
from torch.distributions import transform_to, constraints

import pyro
import pyro.distributions as dist
import pyro.ops.stats as stats
import pyro.poutine as poutine
from pyro.contrib.autoguide import AutoLaplaceApproximation
from pyro.infer import TracePosterior, TracePredictive, Trace_ELBO
from pyro.infer.mcmc import MCMC
from pyro.ops.welford import WelfordCovariance

os.environ["CUDA_VISIBLE_DEVICES"] = ""
warnings.simplefilter("ignore", FutureWarning)

mp.set_sharing_strategy("file_system")
sns.set(font_scale=1.25, rc={"figure.figsize": (8, 6)})

pyro.enable_validation()
pyro.set_rng_seed(0)


class MAP(TracePosterior):
    def __init__(self, model, num_samples=10000, start={}):
        super(MAP, self).__init__()
        self.model = model
        self.num_samples = num_samples
        self.start = start
Example #36
0
def configuration(parent_package="", top_path=None):
    from numpy.distutils.misc_util import Configuration, dot_join
    from numpy.distutils.system_info import get_info

    config = Configuration("core", parent_package, top_path)
    local_dir = config.local_path
    codegen_dir = join(local_dir, "code_generators")

    if is_released(config):
        warnings.simplefilter("error", MismatchCAPIWarning)

    # Check whether we have a mismatch between the set C API VERSION and the
    # actual C API VERSION
    check_api_version(C_API_VERSION, codegen_dir)

    generate_umath_py = join(codegen_dir, "generate_umath.py")
    n = dot_join(config.name, "generate_umath")
    generate_umath = npy_load_module(
        "_".join(n.split(".")), generate_umath_py, (".py", "U", 1)
    )

    header_dir = "include/numpy"  # this is relative to config.path_in_package

    cocache = CallOnceOnly()

    def generate_config_h(ext, build_dir):
        target = join(build_dir, header_dir, "config.h")
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)

        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info("Generating %s", target)

            # Check sizeof
            moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)

            # Check math library and C99 math funcs availability
            mathlibs = check_mathlib(config_cmd)
            moredefs.append(("MATHLIB", ",".join(mathlibs)))

            check_math_capabilities(config_cmd, moredefs, mathlibs)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])

            # Signal check
            if is_npy_no_signal():
                moredefs.append("__NPY_PRIVATE_NO_SIGNAL")

            # Windows checks
            if sys.platform == "win32" or os.name == "nt":
                win32_checks(moredefs)

            # C99 restrict keyword
            moredefs.append(("NPY_RESTRICT", config_cmd.check_restrict()))

            # Inline check
            inline = config_cmd.check_inline()

            # Use relaxed stride checking
            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(("NPY_RELAXED_STRIDES_CHECKING", 1))

            # Use bogus stride debug aid when relaxed strides are enabled
            if NPY_RELAXED_STRIDES_DEBUG:
                moredefs.append(("NPY_RELAXED_STRIDES_DEBUG", 1))

            # Get long double representation
            rep = check_long_double_representation(config_cmd)
            moredefs.append(("HAVE_LDOUBLE_%s" % rep, 1))

            # Py3K check
            if sys.version_info[0] == 3:
                moredefs.append(("NPY_PY3K", 1))

            # Generate the config.h file from moredefs
            target_f = open(target, "w")
            for d in moredefs:
                if isinstance(d, str):
                    target_f.write("#define %s\n" % (d))
                else:
                    target_f.write("#define %s %s\n" % (d[0], d[1]))

            # define inline to our keyword, or nothing
            target_f.write("#ifndef __cplusplus\n")
            if inline == "inline":
                target_f.write("/* #undef inline */\n")
            else:
                target_f.write("#define inline %s\n" % inline)
            target_f.write("#endif\n")

            # add the guard to make sure config.h is never included directly,
            # but always through npy_config.h
            target_f.write(
                """
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
"""
            )

            target_f.close()
            print("File:", target)
            target_f = open(target)
            print(target_f.read())
            target_f.close()
            print("EOF")
        else:
            mathlibs = []
            target_f = open(target)
            for line in target_f:
                s = "#define MATHLIB"
                if line.startswith(s):
                    value = line[len(s) :].strip()
                    if value:
                        mathlibs.extend(value.split(","))
            target_f.close()

        # Ugly: this can be called within a library and not an extension,
        # in which case there is no libraries attributes (and none is
        # needed).
        if hasattr(ext, "libraries"):
            ext.libraries.extend(mathlibs)

        incl_dir = os.path.dirname(target)
        if incl_dir not in config.numpy_include_dirs:
            config.numpy_include_dirs.append(incl_dir)

        return target

    def generate_numpyconfig_h(ext, build_dir):
        """Depends on config.h: generate_config_h has to be called before !"""
        # put common include directory in build_dir on search path
        # allows using code generation in headers headers
        config.add_include_dirs(join(build_dir, "src", "common"))
        config.add_include_dirs(join(build_dir, "src", "npymath"))

        target = join(build_dir, header_dir, "_numpyconfig.h")
        d = os.path.dirname(target)
        if not os.path.exists(d):
            os.makedirs(d)
        if newer(__file__, target):
            config_cmd = config.get_config_cmd()
            log.info("Generating %s", target)

            # Check sizeof
            ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)

            if is_npy_no_signal():
                moredefs.append(("NPY_NO_SIGNAL", 1))

            if is_npy_no_smp():
                moredefs.append(("NPY_NO_SMP", 1))
            else:
                moredefs.append(("NPY_NO_SMP", 0))

            mathlibs = check_mathlib(config_cmd)
            moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
            moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])

            if NPY_RELAXED_STRIDES_CHECKING:
                moredefs.append(("NPY_RELAXED_STRIDES_CHECKING", 1))

            if NPY_RELAXED_STRIDES_DEBUG:
                moredefs.append(("NPY_RELAXED_STRIDES_DEBUG", 1))

            # Check whether we can use inttypes (C99) formats
            if config_cmd.check_decl("PRIdPTR", headers=["inttypes.h"]):
                moredefs.append(("NPY_USE_C99_FORMATS", 1))

            # visibility check
            hidden_visibility = visibility_define(config_cmd)
            moredefs.append(("NPY_VISIBILITY_HIDDEN", hidden_visibility))

            # Add the C API/ABI versions
            moredefs.append(("NPY_ABI_VERSION", "0x%.8X" % C_ABI_VERSION))
            moredefs.append(("NPY_API_VERSION", "0x%.8X" % C_API_VERSION))

            # Add moredefs to header
            target_f = open(target, "w")
            for d in moredefs:
                if isinstance(d, str):
                    target_f.write("#define %s\n" % (d))
                else:
                    target_f.write("#define %s %s\n" % (d[0], d[1]))

            # Define __STDC_FORMAT_MACROS
            target_f.write(
                """
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
"""
            )
            target_f.close()

            # Dump the numpyconfig.h header to stdout
            print("File: %s" % target)
            target_f = open(target)
            print(target_f.read())
            target_f.close()
            print("EOF")
        config.add_data_files((header_dir, target))
        return target

    def generate_api_func(module_name):
        def generate_api(ext, build_dir):
            script = join(codegen_dir, module_name + ".py")
            sys.path.insert(0, codegen_dir)
            try:
                m = __import__(module_name)
                log.info("executing %s", script)
                h_file, c_file, doc_file = m.generate_api(
                    os.path.join(build_dir, header_dir)
                )
            finally:
                del sys.path[0]
            config.add_data_files((header_dir, h_file), (header_dir, doc_file))
            return (h_file,)

        return generate_api

    generate_numpy_api = generate_api_func("generate_numpy_api")
    generate_ufunc_api = generate_api_func("generate_ufunc_api")

    config.add_include_dirs(join(local_dir, "src", "common"))
    config.add_include_dirs(join(local_dir, "src"))
    config.add_include_dirs(join(local_dir))

    config.add_data_files("include/numpy/*.h")
    config.add_include_dirs(join("src", "npymath"))
    config.add_include_dirs(join("src", "multiarray"))
    config.add_include_dirs(join("src", "umath"))
    config.add_include_dirs(join("src", "npysort"))

    config.add_define_macros(
        [("NPY_INTERNAL_BUILD", "1")]
    )  # this macro indicates that Numpy build is in process
    config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
    if sys.platform[:3] == "aix":
        config.add_define_macros([("_LARGE_FILES", None)])
    else:
        config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
        config.add_define_macros([("_LARGEFILE_SOURCE", "1")])
        config.add_define_macros([("_LARGEFILE64_SOURCE", "1")])

    config.numpy_include_dirs.extend(config.paths("include"))

    deps = [
        join("src", "npymath", "_signbit.c"),
        join("include", "numpy", "*object.h"),
        join(codegen_dir, "genapi.py"),
    ]

    #######################################################################
    #                            dummy module                             #
    #######################################################################

    # npymath needs the config.h and numpyconfig.h files to be generated, but
    # build_clib cannot handle generate_config_h and generate_numpyconfig_h
    # (don't ask). Because clib are generated before extensions, we have to
    # explicitly add an extension which has generate_config_h and
    # generate_numpyconfig_h as sources *before* adding npymath.

    config.add_extension(
        "_dummy",
        sources=[
            join("src", "dummymodule.c"),
            generate_config_h,
            generate_numpyconfig_h,
            generate_numpy_api,
        ],
    )

    #######################################################################
    #                          npymath library                            #
    #######################################################################

    subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])

    def get_mathlib_info(*args):
        # Another ugly hack: the mathlib info is known once build_src is run,
        # but we cannot use add_installed_pkg_config here either, so we only
        # update the substitution dictionary during npymath build
        config_cmd = config.get_config_cmd()

        # Check that the toolchain works, to fail early if it doesn't
        # (avoid late errors with MATHLIB which are confusing if the
        # compiler does not work).
        st = config_cmd.try_link("int main(void) { return 0;}")
        if not st:
            raise RuntimeError("Broken toolchain: cannot link a simple C program")
        mlibs = check_mathlib(config_cmd)

        posix_mlib = " ".join(["-l%s" % l for l in mlibs])
        msvc_mlib = " ".join(["%s.lib" % l for l in mlibs])
        subst_dict["posix_mathlib"] = posix_mlib
        subst_dict["msvc_mathlib"] = msvc_mlib

    npymath_sources = [
        join("src", "npymath", "npy_math_internal.h.src"),
        join("src", "npymath", "npy_math.c"),
        join("src", "npymath", "ieee754.c.src"),
        join("src", "npymath", "npy_math_complex.c.src"),
        join("src", "npymath", "halffloat.c"),
    ]

    # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
    # Intel and Clang also don't seem happy with /GL
    is_msvc = platform.platform().startswith(
        "Windows"
    ) and platform.python_compiler().startswith("MS")
    config.add_installed_library(
        "npymath",
        sources=npymath_sources + [get_mathlib_info],
        install_dir="lib",
        build_info={
            "include_dirs": [],  # empty list required for creating npy_math_internal.h
            "extra_compiler_args": (["/GL-"] if is_msvc else []),
        },
    )
    config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config", subst_dict)
    config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config", subst_dict)

    #######################################################################
    #                         npysort library                             #
    #######################################################################

    # This library is created for the build but it is not installed
    npysort_sources = [
        join("src", "common", "npy_sort.h.src"),
        join("src", "npysort", "quicksort.c.src"),
        join("src", "npysort", "mergesort.c.src"),
        join("src", "npysort", "heapsort.c.src"),
        join("src", "common", "npy_partition.h.src"),
        join("src", "npysort", "selection.c.src"),
        join("src", "common", "npy_binsearch.h.src"),
        join("src", "npysort", "binsearch.c.src"),
    ]
    config.add_library("npysort", sources=npysort_sources, include_dirs=[])

    #######################################################################
    #                     multiarray_tests module                         #
    #######################################################################

    config.add_extension(
        "_multiarray_tests",
        sources=[
            join("src", "multiarray", "_multiarray_tests.c.src"),
            join("src", "common", "mem_overlap.c"),
        ],
        depends=[
            join("src", "common", "mem_overlap.h"),
            join("src", "common", "npy_extint128.h"),
        ],
        libraries=["npymath"],
    )

    #######################################################################
    #             _multiarray_umath module - common part                  #
    #######################################################################

    common_deps = [
        join("src", "common", "array_assign.h"),
        join("src", "common", "binop_override.h"),
        join("src", "common", "cblasfuncs.h"),
        join("src", "common", "lowlevel_strided_loops.h"),
        join("src", "common", "mem_overlap.h"),
        join("src", "common", "npy_cblas.h"),
        join("src", "common", "npy_config.h"),
        join("src", "common", "npy_ctypes.h"),
        join("src", "common", "npy_extint128.h"),
        join("src", "common", "npy_import.h"),
        join("src", "common", "npy_longdouble.h"),
        join("src", "common", "templ_common.h.src"),
        join("src", "common", "ucsnarrow.h"),
        join("src", "common", "ufunc_override.h"),
        join("src", "common", "umathmodule.h"),
        join("src", "common", "numpyos.h"),
    ]

    common_src = [
        join("src", "common", "array_assign.c"),
        join("src", "common", "mem_overlap.c"),
        join("src", "common", "npy_longdouble.c"),
        join("src", "common", "templ_common.h.src"),
        join("src", "common", "ucsnarrow.c"),
        join("src", "common", "ufunc_override.c"),
        join("src", "common", "numpyos.c"),
    ]

    blas_info = get_info("blas_opt", 0)
    if blas_info and ("HAVE_CBLAS", None) in blas_info.get("define_macros", []):
        extra_info = blas_info
        # These files are also in MANIFEST.in so that they are always in
        # the source distribution independently of HAVE_CBLAS.
        common_src.extend(
            [
                join("src", "common", "cblasfuncs.c"),
                join("src", "common", "python_xerbla.c"),
            ]
        )
        if uses_accelerate_framework(blas_info):
            common_src.extend(get_sgemv_fix())
    else:
        extra_info = {}

    #######################################################################
    #             _multiarray_umath module - multiarray part              #
    #######################################################################

    multiarray_deps = (
        [
            join("src", "multiarray", "arrayobject.h"),
            join("src", "multiarray", "arraytypes.h"),
            join("src", "multiarray", "arrayfunction_override.h"),
            join("src", "multiarray", "buffer.h"),
            join("src", "multiarray", "calculation.h"),
            join("src", "multiarray", "common.h"),
            join("src", "multiarray", "convert_datatype.h"),
            join("src", "multiarray", "convert.h"),
            join("src", "multiarray", "conversion_utils.h"),
            join("src", "multiarray", "ctors.h"),
            join("src", "multiarray", "descriptor.h"),
            join("src", "multiarray", "dragon4.h"),
            join("src", "multiarray", "getset.h"),
            join("src", "multiarray", "hashdescr.h"),
            join("src", "multiarray", "iterators.h"),
            join("src", "multiarray", "mapping.h"),
            join("src", "multiarray", "methods.h"),
            join("src", "multiarray", "multiarraymodule.h"),
            join("src", "multiarray", "nditer_impl.h"),
            join("src", "multiarray", "number.h"),
            join("src", "multiarray", "refcount.h"),
            join("src", "multiarray", "scalartypes.h"),
            join("src", "multiarray", "sequence.h"),
            join("src", "multiarray", "shape.h"),
            join("src", "multiarray", "strfuncs.h"),
            join("src", "multiarray", "typeinfo.h"),
            join("src", "multiarray", "usertypes.h"),
            join("src", "multiarray", "vdot.h"),
            join("include", "numpy", "arrayobject.h"),
            join("include", "numpy", "_neighborhood_iterator_imp.h"),
            join("include", "numpy", "npy_endian.h"),
            join("include", "numpy", "arrayscalars.h"),
            join("include", "numpy", "noprefix.h"),
            join("include", "numpy", "npy_interrupt.h"),
            join("include", "numpy", "npy_3kcompat.h"),
            join("include", "numpy", "npy_math.h"),
            join("include", "numpy", "halffloat.h"),
            join("include", "numpy", "npy_common.h"),
            join("include", "numpy", "npy_os.h"),
            join("include", "numpy", "utils.h"),
            join("include", "numpy", "ndarrayobject.h"),
            join("include", "numpy", "npy_cpu.h"),
            join("include", "numpy", "numpyconfig.h"),
            join("include", "numpy", "ndarraytypes.h"),
            join("include", "numpy", "npy_1_7_deprecated_api.h"),
            # add library sources as distuils does not consider libraries
            # dependencies
        ]
        + npysort_sources
        + npymath_sources
    )

    multiarray_src = [
        join("src", "multiarray", "alloc.c"),
        join("src", "multiarray", "arrayobject.c"),
        join("src", "multiarray", "arraytypes.c.src"),
        join("src", "multiarray", "array_assign_scalar.c"),
        join("src", "multiarray", "array_assign_array.c"),
        join("src", "multiarray", "arrayfunction_override.c"),
        join("src", "multiarray", "buffer.c"),
        join("src", "multiarray", "calculation.c"),
        join("src", "multiarray", "compiled_base.c"),
        join("src", "multiarray", "common.c"),
        join("src", "multiarray", "convert.c"),
        join("src", "multiarray", "convert_datatype.c"),
        join("src", "multiarray", "conversion_utils.c"),
        join("src", "multiarray", "ctors.c"),
        join("src", "multiarray", "datetime.c"),
        join("src", "multiarray", "datetime_strings.c"),
        join("src", "multiarray", "datetime_busday.c"),
        join("src", "multiarray", "datetime_busdaycal.c"),
        join("src", "multiarray", "descriptor.c"),
        join("src", "multiarray", "dragon4.c"),
        join("src", "multiarray", "dtype_transfer.c"),
        join("src", "multiarray", "einsum.c.src"),
        join("src", "multiarray", "flagsobject.c"),
        join("src", "multiarray", "getset.c"),
        join("src", "multiarray", "hashdescr.c"),
        join("src", "multiarray", "item_selection.c"),
        join("src", "multiarray", "iterators.c"),
        join("src", "multiarray", "lowlevel_strided_loops.c.src"),
        join("src", "multiarray", "mapping.c"),
        join("src", "multiarray", "methods.c"),
        join("src", "multiarray", "multiarraymodule.c"),
        join("src", "multiarray", "nditer_templ.c.src"),
        join("src", "multiarray", "nditer_api.c"),
        join("src", "multiarray", "nditer_constr.c"),
        join("src", "multiarray", "nditer_pywrap.c"),
        join("src", "multiarray", "number.c"),
        join("src", "multiarray", "refcount.c"),
        join("src", "multiarray", "sequence.c"),
        join("src", "multiarray", "shape.c"),
        join("src", "multiarray", "scalarapi.c"),
        join("src", "multiarray", "scalartypes.c.src"),
        join("src", "multiarray", "strfuncs.c"),
        join("src", "multiarray", "temp_elide.c"),
        join("src", "multiarray", "typeinfo.c"),
        join("src", "multiarray", "usertypes.c"),
        join("src", "multiarray", "vdot.c"),
    ]

    #######################################################################
    #             _multiarray_umath module - umath part                   #
    #######################################################################

    def generate_umath_c(ext, build_dir):
        target = join(build_dir, header_dir, "__umath_generated.c")
        dir = os.path.dirname(target)
        if not os.path.exists(dir):
            os.makedirs(dir)
        script = generate_umath_py
        if newer(script, target):
            f = open(target, "w")
            f.write(
                generate_umath.make_code(
                    generate_umath.defdict, generate_umath.__file__
                )
            )
            f.close()
        return []

    umath_src = [
        join("src", "umath", "umathmodule.c"),
        join("src", "umath", "reduction.c"),
        join("src", "umath", "funcs.inc.src"),
        join("src", "umath", "simd.inc.src"),
        join("src", "umath", "loops.h.src"),
        join("src", "umath", "loops.c.src"),
        join("src", "umath", "matmul.h.src"),
        join("src", "umath", "matmul.c.src"),
        join("src", "umath", "ufunc_object.c"),
        join("src", "umath", "extobj.c"),
        join("src", "umath", "cpuid.c"),
        join("src", "umath", "scalarmath.c.src"),
        join("src", "umath", "ufunc_type_resolution.c"),
        join("src", "umath", "override.c"),
    ]

    umath_deps = [
        generate_umath_py,
        join("include", "numpy", "npy_math.h"),
        join("include", "numpy", "halffloat.h"),
        join("src", "multiarray", "common.h"),
        join("src", "multiarray", "number.h"),
        join("src", "common", "templ_common.h.src"),
        join("src", "umath", "simd.inc.src"),
        join("src", "umath", "override.h"),
        join(codegen_dir, "generate_ufunc_api.py"),
    ]

    config.add_extension(
        "_multiarray_umath",
        sources=multiarray_src
        + umath_src
        + npymath_sources
        + common_src
        + [
            generate_config_h,
            generate_numpyconfig_h,
            generate_numpy_api,
            join(codegen_dir, "generate_numpy_api.py"),
            join("*.py"),
            generate_umath_c,
            generate_ufunc_api,
        ],
        depends=deps + multiarray_deps + umath_deps + common_deps,
        libraries=["npymath", "npysort"],
        extra_info=extra_info,
    )

    #######################################################################
    #                        umath_tests module                           #
    #######################################################################

    config.add_extension(
        "_umath_tests", sources=[join("src", "umath", "_umath_tests.c.src")]
    )

    #######################################################################
    #                   custom rational dtype module                      #
    #######################################################################

    config.add_extension(
        "_rational_tests", sources=[join("src", "umath", "_rational_tests.c.src")]
    )

    #######################################################################
    #                        struct_ufunc_test module                     #
    #######################################################################

    config.add_extension(
        "_struct_ufunc_tests",
        sources=[join("src", "umath", "_struct_ufunc_tests.c.src")],
    )

    #######################################################################
    #                        operand_flag_tests module                    #
    #######################################################################

    config.add_extension(
        "_operand_flag_tests",
        sources=[join("src", "umath", "_operand_flag_tests.c.src")],
    )

    config.add_data_dir("tests")
    config.add_data_dir("tests/data")

    config.make_svn_version_py()

    return config
Example #37
0
 def test_redirect_https(self):
     "GET a URL that redirects to an https URI"
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", RemovedInDjango19Warning)
         response = self.client.get('/https_redirect_view/', follow=True)
     self.assertTrue(response.test_was_secure_request)
def run_reduce_dim(
    adata,
    X_data,
    n_components,
    n_pca_components,
    reduction_method,
    embedding_key,
    n_neighbors,
    neighbor_key,
    cores,
    kwargs,
):
    if reduction_method == "trimap":
        import trimap

        triplemap = trimap.TRIMAP(
            n_inliers=20,
            n_outliers=10,
            n_random=10,
            distance="euclidean",  # cosine
            weight_adj=1000.0,
            apply_pca=False,
        )
        X_dim = triplemap.fit_transform(X_data)

        adata.obsm[embedding_key] = X_dim
        adata.uns[neighbor_key] = {
            "params": {"n_neighbors": n_neighbors, "method": reduction_method},
            # "connectivities": "connectivities",
            # "distances": "distances",
            # "indices": "indices",
        }
    elif reduction_method == "diffusion_map":
        # support Yan's diffusion map here
        pass
    elif reduction_method.lower() == "tsne":
        try:
            from fitsne import FItSNE
        except ImportError:
            print(
                "Please first install fitsne to perform accelerated tSNE method. Install instruction is "
                "provided here: https://pypi.org/project/fitsne/"
            )

        X_dim = FItSNE(X_data, nthreads=cores)  # use FitSNE

        # bh_tsne = TSNE(n_components = n_components)
        # X_dim = bh_tsne.fit_transform(X)
        adata.obsm[embedding_key] = X_dim
        adata.uns[neighbor_key] = {
            "params": {"n_neighbors": n_neighbors, "method": reduction_method},
            # "connectivities": "connectivities",
            # "distances": "distances",
            # "indices": "indices",
        }
    elif reduction_method == "umap":
        _umap_kwargs = {
            "n_components": n_components,
            "metric": "euclidean",
            "min_dist": 0.5,
            "spread": 1.0,
            "n_epochs": 0,
            "alpha": 1.0,
            "gamma": 1.0,
            "negative_sample_rate": 5,
            "init_pos": "spectral",
            "random_state": 0,
            "densmap": False,
            "dens_lambda": 2.0,
            "dens_frac": 0.3,
            "dens_var_shift": 0.1,
            "output_dens": False,
            "verbose": False,
        }
        umap_kwargs = update_dict(_umap_kwargs, kwargs)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            (
                mapper,
                graph,
                knn_indices,
                knn_dists,
                X_dim,
            ) = umap_conn_indices_dist_embedding(X_data, n_neighbors, **umap_kwargs)

        adata.obsm[embedding_key] = X_dim
        knn_dists = knn_to_adj(knn_indices, knn_dists)
        adata.uns[neighbor_key] = {
            "params": {"n_neighbors": n_neighbors, "method": reduction_method},
            # "connectivities": "connectivities",
            # "distances": "distances",
            "indices": knn_indices,
        }

        layer = neighbor_key.split("_")[0] if neighbor_key.__contains__("_") else None
        conn_key = "connectivities" if layer is None else layer + "_connectivities"
        dist_key = "distances" if layer is None else layer + "_distances"

        adata.obsp[conn_key], adata.obsp[dist_key] = graph, knn_dists

        adata.uns["umap_fit"] = {
            "fit": mapper,
            "n_pca_components": n_pca_components,
        }
    elif reduction_method == "psl":
        adj_mat, X_dim = psl(X_data, d=n_components, K=n_neighbors)  # this need to be updated
        adata.obsm[embedding_key] = X_dim
        adata.uns[neighbor_key] = adj_mat

    else:
        raise Exception("reduction_method {} is not supported.".format(reduction_method))

    return adata
Example #39
0
    def setUp(self):
        """Set up test fixtures, if any."""

        # turn off warnings unless refactoring.
        warnings.simplefilter('ignore')
Example #40
0
from skbayes.linear_models import VBLinearRegression, EBLinearRegression
from sklearn import linear_model

#from wpca import WPCA, EMPCA
import RobustPCA
import FastICA
import KICA

import math
import subprocess
import tga
import warnings
import sys

if not sys.warnoptions:
    warnings.simplefilter('ignore')

def NRMSE(y_true, y_pred, scaler):
    y_true = scaler.inverse_transform(y_true)
    y_pred = scaler.inverse_transform(y_pred)

    #Normalized Root Mean Squared Error
    y_std = np.std(y_true)

    #return mean_squared_error(y_true, y_pred)
    return np.sqrt(mean_squared_error(y_true, y_pred))/y_std

class ESN(object):
    def __init__(self, n_internal_units = 100, spectral_radius = 0.9, connectivity = 0.5, input_scaling = 0.5, input_shift = 0.0,
                 teacher_scaling = 0.5, teacher_shift = 0.0, noise_level = 0.01):
        # Initialize attributes
Example #41
0
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  7 10:40:07 2017
Copyright (C) 2018
@author: Derek Pisner (dPys)
"""
import warnings
warnings.simplefilter("ignore")
import numpy as np
import networkx as nx


def threshold_absolute(W, thr, copy=True):
    '''# Adapted from bctpy
    '''
    if copy:
        W = W.copy()
    np.fill_diagonal(W, 0)
    W[W < thr] = 0
    return W


def threshold_proportional(W, p, copy=True):
    '''# Adapted from bctpy
    '''
    if p > 1 or p < 0:
        raise ValueError('Threshold must be in range [0,1]')
    if copy:
        W = W.copy()
    n = len(W)
    np.fill_diagonal(W, 0)
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.utils import get_runner
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import (
    RemovedInDjango20Warning,
    RemovedInDjango110Warning,
)
from django.utils.log import DEFAULT_LOGGING

# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango110Warning)
warnings.simplefilter("error", RemovedInDjango20Warning)
# Make runtime warning errors to ensure no usage of error prone patterns.
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore",
                        "'U' mode is deprecated",
                        DeprecationWarning,
                        module='docutils.io')

RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))

TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')

# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
Example #43
0
    def permit_single_axis_tracking(self, max_angle=90, backtrack=True, gcr=2.0 / 7.0):
        """

        permit_single_axis_tracking(self, max_angle=90, backtrack=True, gcr=2.0 / 7.0)

        Permits single axis tracking in the simulation using the pvlib.tracking.singleaxis() function [1].


        Parameters
        ----------
        max_angle: float, optional
                   default 90
                   A value denoting the maximum rotation angle, in decimal degrees, of the one-axis tracker from its horizontal position
                   (horizontal if axis_tilt = 0). A max_angle of 90 degrees allows the tracker to rotate to a vertical position to point the
                   panel towards a horizon. max_angle of 180 degrees allows for full rotation [1].

        backtrack: bool, optional
                   default True
                   Controls whether the tracker has the capability to “backtrack” to avoid row-to-row shading.
                   False denotes no backtrack capability. True denotes backtrack capability [1].

        gcr:       float, optional
                   default 2.0/7.0
                   A value denoting the ground coverage ratio of a tracker system which utilizes backtracking; i.e. the ratio between the
                   PV array surface area to total ground area. A tracker system with modules 2 meters wide, centered on the tracking axis,
                   with 6 meters between the tracking axes has a gcr of 2/6=0.333. If gcr is not provided, a gcr of 2/7 is default. gcr must be <=1 [1].


        Returns
        -------
        Returns a reference to the invoking SolarWorkflowManager object.

        Notes
        -----
        Required columns in the placements dataframe to use this functions are 'lon', 'lat', 'elev', 'tilt' and 'azimuth'.
        Required data in the sim_data dictionary are 'apparent_solar_zenith' and 'solar_azimuth'.

        References
        ----------
        [1] https://wholmgren-pvlib-python-new.readthedocs.io/en/doc-reorg2/generated/tracking/pvlib.tracking.singleaxis.html

        [2]	Lorenzo, E et al., 2011, “Tracking and back-tracking”, Prog. in Photovoltaics: Research and Applications, v. 19, pp. 747-753.

        """

        """See pvlib.tracking.singleaxis for parameter info"""
        assert "apparent_solar_zenith" in self.sim_data
        assert "solar_azimuth" in self.sim_data
        assert "tilt" in self.placements.columns
        assert "azimuth" in self.placements.columns

        self.register_workflow_parameter("tracking_mode", "fixed")
        self.register_workflow_parameter("tracking_max_angle", max_angle)
        self.register_workflow_parameter("tracking_backtrack", backtrack)
        self.register_workflow_parameter("tracking_gcr", gcr)

        system_tilt = np.empty(self._sim_shape_)
        system_azimuth = np.empty(self._sim_shape_)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            for i in range(self.locs.count):
                placement = self.placements.iloc[i]

                tmp = pvlib.tracking.singleaxis(
                    apparent_zenith=pd.Series(self.sim_data['apparent_solar_zenith'][:, i], index=self._time_index_),
                    apparent_azimuth=pd.Series(self.sim_data['solar_azimuth'][:, i], index=self._time_index_),
                    axis_tilt=placement.tilt,  # self.placements['tilt'].values,
                    axis_azimuth=placement.azimuth,  # self.placements['azimuth'].values,
                    max_angle=max_angle,
                    backtrack=backtrack,
                    gcr=gcr)

                system_tilt[:, i] = tmp['surface_tilt'].values
                system_azimuth[:, i] = tmp['surface_azimuth'].values

                # fix nan values. Why are they there???
                s = np.isnan(system_tilt[:, i])
                system_tilt[s, i] = placement.tilt

                s = np.isnan(system_azimuth[:, i])
                system_azimuth[s, i] = placement.azimuth

        self.sim_data['system_tilt'] = system_tilt
        self.sim_data['system_azimuth'] = system_azimuth

        return self
Example #44
0
Gives an optimal fit to binned rate data
using curve fit
L. Strolger
2018
'''
import os, sys, pdb, scipy, glob
from pylab import *
from strolger_util import util as u
from strolger_util import rates_z as rz
from strolger_util import imf
from strolger_util import cosmotools as ct
from scipy.integrate import simps, quad
from scipy.optimize import curve_fit
from copy import copy, deepcopy
import warnings
warnings.simplefilter("ignore", RuntimeWarning)


def dtdfit(time, *p):
    ff, aa, bb, cc = p
    scale = quad(imf.salpeter, 3, 8)[0] / quad(imf.salpeter1, 0.1, 125)[0]
    scale = scale * 0.7**2. * 1e4
    par_model = [0.013, 2.6, 3.2, 6.1]
    sfh = rz.csfh_time(time, *par_model)
    dt = sum(diff(time)) / (len(time) - 1)
    p1 = (aa, bb, cc)
    res = rz.dtdfunc(time, *p1, norm=True)
    tmp = convolve(sfh, res, 'full')
    return (ff * tmp[:len(time)] * dt * scale)

import warnings; warnings.simplefilter("ignore")
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import csv

#read workbook
note= pd.ExcelFile(r"/home/rauchdi/Desktop/5A IPS/Madeth/Pastel/NotesGpV1.xlsx")

#print all data
#print(note)

dfs = {sheet_name: note.parse("Sheet1") 
          for sheet_name in note.sheet_names}



def extract():
    x=dfs.values()
    #transformer en str
    s=str(dfs)
    #diviser les données
    spli=s.splitlines()
    #enlevé les ****
    spli.remove("{'Sheet1':                 Barême: ")
    #faire les groupe brute
    nwList=[]
    for i in spli:
        nwList.append(i.split(" "))
Example #46
0
def uncles(X,
           type='A',
           Ks=[n for n in range(4, 21, 4)],
           params=None,
           methods=None,
           methodsDetailed=None,
           U=None,
           Utype='PM',
           relabel_technique='minmin',
           setsP=None,
           setsN=None,
           dofuzzystretch=False,
           wsets=None,
           wmethods=None,
           GDM=None,
           smallestClusterSize=11,
           CoPaMfinetrials=1,
           CoPaMfinaltrials=1,
           binarise_techniqueP='DTB',
           binarise_paramP=np.arange(0.0, 1.1, 0.1, dtype='float'),
           binarise_techniqueN='DTB',
           binarise_paramN=np.concatenate(([sys.float_info.epsilon],
                                           np.arange(0.1,
                                                     1.1,
                                                     0.1,
                                                     dtype='float'))),
           Xnames=None,
           deterministic=False,
           ncores=1):
    Xloc = ds.listofarrays2arrayofarrays(X)
    L = len(Xloc)  # Number of datasets

    # Fix parameters
    if params is None: params = {}
    if setsP is None: setsP = [x for x in range(int(math.floor(L / 2)))]
    if setsN is None: setsN = [x for x in range(int(math.floor(L / 2)), L)]
    setsPN = np.array(np.concatenate((setsP, setsN), axis=0), dtype=int)
    Xloc = Xloc[setsPN]
    L = np.shape(Xloc)[0]  # Number of datasets
    if wsets is None:
        wsets = np.array([1 for x in range(L)])
    else:
        wsets = np.array(wsets)[setsPN]
    if GDM is None:
        Ng = np.shape(Xloc[0])[0]
        GDMloc = np.ones([Ng, L], dtype='bool')
    else:
        GDMloc = GDM[:, setsPN]
        Ng = GDMloc.shape[0]
    if Xnames is None:
        Xnames = ['X{0}'.format(l) for l in range(L)]

    if methods is None:
        methods = [['k-means']]
        # largest_DS = np.max([x.shape[0] for x in Xloc])
        # if (largest_DS <= maxgenesinsetforpdist):
        #     if (deterministic):
        #         methods = [['k-means'], ['HC']]
        #     else:
        #         methods = [['k-means'], ['SOMs'], ['HC']]
        # else:
        #     if (deterministic):
        #         methods = [['k-means']]
        #     else:
        #         methods = [['k-means'], ['SOMs']]
    else:
        largest_DS = np.max([x.shape[0] for x in Xloc])
        if (largest_DS > maxgenesinsetforpdist):
            methods = [
                m for m in methods
                if 'hc' not in [entry.lower() for entry in m]
            ]
            if not methods:
                io.log('No valid base clustering can be used. Please note that clust would not use HC clustering ' \
                       'on datasets with more than {0} genes. You have a dataset with {1} genes.' \
                       ''.format(maxgenesinsetforpdist, largest_DS))
                io.log('Clust will terminate here.')
                io.log(op.bottomline(), addextrastick=False)
                sys.exit()
    if methodsDetailed is None:
        methodsDetailedloc = np.array([methods for l in range(L)])
    else:
        methodsDetailedloc = methodsDetailed[setsPN]
    if wmethods is None:
        wmethods = [[1 for x in m] for m in methodsDetailedloc]
    elif not isinstance(wmethods[0], (list, tuple, np.ndarray)):
        wmethods = np.tile(methods, [L, 1])
    else:
        wmethods = np.array(wmethods)[setsPN]

    setsPloc = [ii for ii in range(len(setsP))]
    if L > len(setsPloc):
        setsNloc = [ii for ii in range(len(setsPloc), L)]

    Ds = [nu.closest_to_square_factors(k)
          for k in Ks]  # Grid sizes for the SOMs method for each value of K
    NKs = len(Ks)  # Number of K values

    # Clustering
    if U is None:
        Utype = 'PM'
        Uloc = np.array([None] * (L * NKs)).reshape([L, NKs])
        totalparallel = np.sum(Ks) * np.sum(
            [len(meths) for meths in methodsDetailedloc])
        for meths in methodsDetailedloc:
            for meth in meths:
                if 'k-means' in meth:
                    totalparallel += np.max(Ks) * np.max(Ks)
                    continue
        io.resetparallelprogress(totalparallel)

        for l in range(L):
            # Cache kmeans initialisations for the dataset once to save time:
            cl.cache_kmeans_init(Xloc[l],
                                 Ks,
                                 methodsDetailedloc[l],
                                 datasetID=l)

            # Now go to parallel clustering
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                Utmp = Parallel(n_jobs=ncores)\
                    (delayed(clustDataset)
                     (Xloc[l], Ks[ki], Ds[ki], methodsDetailedloc[l], GDMloc[:, l], Ng, l) for ki in range(NKs))

                Utmp = [u for u in Utmp]
                for ki in range(NKs):
                    Uloc[l, ki] = Utmp[ki]

                gc.collect()
                #io.updateparallelprogress(np.sum(Ks) * len(methodsDetailedloc))

    else:
        Uloc = ds.listofarrays2arrayofarrays(U)[setsPN]

    # Calculate a CoPaM for each dataset at each K
    CoPaMsFine = np.array([None] * (L * NKs)).reshape([L, NKs])
    for l in range(L):
        for ki in range(NKs):
            if Utype.lower() == 'pm':
                CoPaMsFineTmp = [
                    generateCoPaM(Uloc[l, ki],
                                  relabel_technique=relabel_technique,
                                  X=[Xloc[l]],
                                  w=wmethods[l],
                                  K=Ks[ki],
                                  GDM=GDMloc[:, l].reshape([-1, 1]))
                    for i in range(CoPaMfinetrials)
                ]
            elif Utype.lower() == 'idx':
                CoPaMsFineTmp = \
                    [generateCoPaMfromidx(Uloc[l, ki], relabel_technique=relabel_technique, X=Xloc,
                                          w=wmethods[l], K=Ks[ki])
                     for i in range(CoPaMfinetrials)]
            else:
                raise ValueError('Invalid Utype')
            CoPaMsFine[l,
                       ki] = generateCoPaM(CoPaMsFineTmp,
                                           relabel_technique=relabel_technique,
                                           X=[Xloc[l]],
                                           GDM=GDMloc[:, l].reshape([-1, 1]))

            if dofuzzystretch:
                CoPaMsFine[l, ki] = fuzzystretch(CoPaMsFine[l, ki])

    # Calculate the final CoPaM for each K
    CoPaMs = np.array([None] * (CoPaMfinaltrials * NKs)).reshape(
        [CoPaMfinaltrials, NKs])
    CoPaMsP = np.array([None] * (CoPaMfinaltrials * NKs)).reshape(
        [CoPaMfinaltrials, NKs])
    CoPaMsN = np.array([None] * (CoPaMfinaltrials * NKs)).reshape(
        [CoPaMfinaltrials, NKs])
    for t in range(CoPaMfinaltrials):
        for ki in range(NKs):
            if type == 'A':
                if Utype.lower() == 'pm':
                    CoPaMs[t, ki] = generateCoPaM(
                        CoPaMsFine[:, ki],
                        relabel_technique=relabel_technique,
                        w=wsets,
                        X=Xloc,
                        GDM=GDMloc)
                elif Utype.lower() == 'idx':
                    CoPaMs[t, ki] = generateCoPaMfromidx(
                        CoPaMsFine[:, ki],
                        relabel_technique=relabel_technique,
                        X=Xloc,
                        w=wsets,
                        GDM=GDMloc)
                else:
                    raise ValueError('Invalid Utype')
            elif type == 'B':
                if Utype.lower() == 'pm':
                    CoPaMsP[t, ki] = generateCoPaM(
                        CoPaMsFine[setsPloc, ki],
                        relabel_technique=relabel_technique,
                        X=Xloc,
                        w=wsets[setsPloc],
                        GDM=GDMloc[:, setsPloc])
                    CoPaMsN[t, ki] = generateCoPaM(
                        CoPaMsFine[setsNloc, ki],
                        relabel_technique=relabel_technique,
                        X=Xloc,
                        w=wsets[setsNloc],
                        GDM=GDMloc[:, setsNloc])
                elif Utype.lower() == 'idx':
                    CoPaMsP[t, ki] = generateCoPaMfromidx(
                        CoPaMsFine[setsPloc, ki],
                        relabel_technique=relabel_technique,
                        X=Xloc,
                        w=wsets[setsPloc],
                        GDM=GDMloc[:, setsPloc])
                    CoPaMsN[t, ki] = generateCoPaMfromidx(
                        CoPaMsFine[setsNloc, ki],
                        relabel_technique=relabel_technique,
                        X=Xloc,
                        w=wsets[setsNloc],
                        GDM=GDMloc[:, setsNloc])
                else:
                    raise ValueError('Invalid Utype')
            else:
                raise ValueError(
                    'Invalid UNCLES type. It has to be either A or B')

    # Binarise
    NPp = len(binarise_paramP)  # Number of P params
    NNp = len(binarise_paramN)  # Number of N params
    if type == 'A':
        B = np.zeros([CoPaMfinaltrials, NPp, 1, NKs], dtype=object)
        Mc = np.zeros([CoPaMfinaltrials, NKs], dtype=object)
    elif type == 'B':
        B = np.zeros([CoPaMfinaltrials, NPp, NNp, NKs], dtype=object)
        Mc = np.zeros([CoPaMfinaltrials, NKs], dtype=object)

    for t in range(CoPaMfinaltrials):
        for ki in range(NKs):
            if type == 'A':
                # Pre-sorting binarisation
                for p in range(NPp):
                    B[t, p, 0, ki] = binarise(CoPaMs[t,
                                                     ki], binarise_techniqueP,
                                              binarise_paramP[p])
                Mc[t, ki] = [np.sum(Bp, axis=0) for Bp in B[t, :, 0, ki]]

                # Sorting
                CoPaMs[t, ki] = sortclusters(CoPaMs[t, ki], Mc[t, ki],
                                             smallestClusterSize)

                # Post-sorting binarisation
                for p in range(NPp):
                    B[t, p, 0, ki] = binarise(CoPaMs[t,
                                                     ki], binarise_techniqueP,
                                              binarise_paramP[p])
                Mc[t, ki] = [np.sum(Bp, axis=0) for Bp in B[t, :, 0, ki]]
            elif type == 'B':
                # Pre-sorting binarisation
                BP = [
                    binarise(CoPaMsP[t, ki], binarise_techniqueP,
                             binarise_paramP[p]) for p in range(NPp)
                ]
                McP = [np.sum(BPp, axis=0) for BPp in BP]

                BN = [
                    binarise(CoPaMsN[t, ki], binarise_techniqueN,
                             binarise_paramN[p]) for p in range(NNp)
                ]
                McN = [np.sum(BNp, axis=0) for BNp in BN]

                # Sorting
                CoPaMsP[t, ki] = sortclusters(CoPaMsP[t, ki], McP,
                                              smallestClusterSize)
                CoPaMsN[t, ki] = sortclusters(CoPaMsN[t, ki], McN,
                                              smallestClusterSize)

                # Post-sorting binarisation
                BP = [
                    binarise(CoPaMsP[t, ki], binarise_techniqueP,
                             binarise_paramP[p]) for p in range(NPp)
                ]
                McP = [np.sum(BPp, axis=0) for BPp in BP]

                BN = [
                    binarise(CoPaMsN[t, ki], binarise_techniqueN,
                             binarise_paramN[p]) for p in range(NNp)
                ]
                McN = [np.sum(BNp, axis=0) for BNp in BN]

                # UNCLES B logic
                for pp in range(NPp):
                    for pn in range(NNp):
                        B[t, pp, pn, ki] = BP[pp]
                        B[t, pp, pn, ki][np.any(BN[pn], axis=1)] = False

                # Fill Mc
                Mc[t, ki] = [None] * Ks[ki]
                for k in range(Ks[ki]):
                    Mc[t, ki][k] = np.zeros([NPp, NNp])
                    for pp in range(NPp):
                        for pn in range(NNp):
                            Mc[t, ki][k][pp, pn] = np.sum(B[t, pp, pn, ki][:,
                                                                           k])

    # Prepare and return the results:
    params = dict(
        params, **{
            'methods': methods,
            'setsP': setsPloc,
            'setsN': setsNloc,
            'dofuzzystretch': dofuzzystretch,
            'type': type,
            'Ks': Ks,
            'NKs': NKs,
            'wsets': wsets,
            'wmethods': wmethods,
            'Ds': Ds,
            'L': L,
            'CoPaMs': CoPaMs,
            'smallestclustersize': smallestClusterSize,
            'GDM': GDMloc
        })

    UnclesRes = collections.namedtuple('UnclesRes',
                                       ['B', 'Mc', 'params', 'X', 'U'])
    return UnclesRes(B, Mc, params, Xloc, Uloc)
Example #47
0
from django.core import mail
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models import Max

from chroniker.models import Job, Log
# from chroniker.tests.commands import Sleeper, InfiniteWaiter, ErrorThrower
# from chroniker.management.commands.cron import run_cron
from chroniker import utils
from chroniker import constants as c
from chroniker import settings as _settings

warnings.simplefilter('error', RuntimeWarning)

socket.gethostname = lambda: 'localhost'

CALLBACK_ERRORS = []

def job_error_callback(job, stdout, stderr):
    print('Error for job %s' % job)
    print(stderr, file=sys.stderr)
    CALLBACK_ERRORS.append(stderr)

class JobProcess(Process):

    def run(self):
        print('Job process started.')
        while 1:
Example #48
0
import boto3

from brownie.exceptions import BrownieEnvironmentWarning
from yearn.apy import ApyFees, ApyPoints, Apy, get_samples, ApySamples, ApyError
from yearn.v1.registry import Registry as RegistryV1
from yearn.v2.registry import Registry as RegistryV2

from yearn.v1.vaults import VaultV1
from yearn.v2.vaults import Vault as VaultV2

from yearn.utils import contract_creation_block, contract

from yearn.exceptions import PriceError
from yearn.networks import Network

warnings.simplefilter("ignore", BrownieEnvironmentWarning)

logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("yearn.apy")


def wrap_vault(vault: Union[VaultV1, VaultV2], samples: ApySamples,
               aliases: dict, icon_url: str, assets_metadata: dict) -> dict:
    apy_error = Apy("error", 0, 0, ApyFees(0, 0), ApyPoints(0, 0, 0))
    try:
        apy = vault.apy(samples)
    except ValueError as error:
        logger.error(error)
        apy = apy_error
    except PriceError as error:
        logger.error(error)
Example #49
0
import logging
import time

import PubMedDB
from sqlalchemy.orm import *
from sqlalchemy import *
from sqlalchemy.exc import *
import gzip
from multiprocessing import Pool


WARNING_LEVEL = "always" #error, ignore, always, default, module, once
# multiple processes, #processors-1 is optimal!
PROCESSES = 4

warnings.simplefilter(WARNING_LEVEL)

#convert 3 letter code of months to digits for unique publication format
month_code = {"Jan":"01","Feb":"02","Mar":"03","Apr":"04","May":"05","Jun":"06","Jul":"07","Aug":"08","Sep":"09","Oct":"10","Nov":"11","Dec":"12"}

class MedlineParser:
    #db is a global variable and given to MedlineParser(path,db) in _start_parser(path)
    def __init__(self, filepath,db):
        engine, Base = PubMedDB.init(db)
        Session = sessionmaker(bind=engine)
        self.filepath = filepath
        self.session = Session()


    def _parse(self):
        _file = self.filepath
Example #50
0
    def _update_thumbnail(self):
        """Update thumbnail with current image data and colormap."""
        if not self.loaded:
            # ASYNC_TODO: Do not compute the thumbnail until we are loaded.
            # Is there a nicer way to prevent this from getting called?
            return

        image = self._slice.thumbnail.view

        if self._ndisplay == 3 and self.ndim > 2:
            image = np.max(image, axis=0)

        # float16 not supported by ndi.zoom
        dtype = np.dtype(image.dtype)
        if dtype in [np.dtype(np.float16)]:
            image = image.astype(np.float32)

        raw_zoom_factor = np.divide(self._thumbnail_shape[:2],
                                    image.shape[:2]).min()
        new_shape = np.clip(
            raw_zoom_factor * np.array(image.shape[:2]),
            1,  # smallest side should be 1 pixel wide
            self._thumbnail_shape[:2],
        )
        zoom_factor = tuple(new_shape / image.shape[:2])
        if self.rgb:
            # warning filter can be removed with scipy 1.4
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                downsampled = ndi.zoom(image,
                                       zoom_factor + (1, ),
                                       prefilter=False,
                                       order=0)
            if image.shape[2] == 4:  # image is RGBA
                colormapped = np.copy(downsampled)
                colormapped[..., 3] = downsampled[..., 3] * self.opacity
                if downsampled.dtype == np.uint8:
                    colormapped = colormapped.astype(np.uint8)
            else:  # image is RGB
                if downsampled.dtype == np.uint8:
                    alpha = np.full(
                        downsampled.shape[:2] + (1, ),
                        int(255 * self.opacity),
                        dtype=np.uint8,
                    )
                else:
                    alpha = np.full(downsampled.shape[:2] + (1, ),
                                    self.opacity)
                colormapped = np.concatenate([downsampled, alpha], axis=2)
        else:
            # warning filter can be removed with scipy 1.4
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                downsampled = ndi.zoom(image,
                                       zoom_factor,
                                       prefilter=False,
                                       order=0)
            low, high = self.contrast_limits
            downsampled = np.clip(downsampled, low, high)
            color_range = high - low
            if color_range != 0:
                downsampled = (downsampled - low) / color_range
            downsampled = downsampled**self.gamma
            color_array = self.colormap.map(downsampled.ravel())
            colormapped = color_array.reshape(downsampled.shape + (4, ))
            colormapped[..., 3] *= self.opacity
        self.thumbnail = colormapped
Example #51
0
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------

from __future__ import print_function

import os, sys, atexit
import signal
import socket
from multiprocessing import Process
from getpass import getpass, getuser
import warnings

try:
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', DeprecationWarning)
        import paramiko
except ImportError:
    paramiko = None
else:
    from forward import forward_tunnel

try:
    from IPython.external import pexpect
except ImportError:
    pexpect = None

#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Example #52
0
    def test_properties(self):

        filepath = os.path.join(test_dir, 'vasprun.xml.nonlm')
        vasprun = Vasprun(filepath, parse_potcar_file=False)
        orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[
            0]].keys())
        self.assertIn(OrbitalType.s, orbs)
        filepath = os.path.join(test_dir, 'vasprun.xml')
        vasprun = Vasprun(filepath, parse_potcar_file=False)

        #Test NELM parsing.
        self.assertEqual(vasprun.parameters["NELM"], 60)
        #test pdos parsing

        pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
        self.assertAlmostEqual(pdos0[Orbital.s][Spin.up][16], 0.0026)
        self.assertAlmostEqual(pdos0[Orbital.pz][Spin.down][16], 0.0012)
        self.assertEqual(pdos0[Orbital.s][Spin.up].shape, (301, ))


        filepath2 = os.path.join(test_dir, 'lifepo4.xml')
        vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True,
                               parse_potcar_file=False)
        totalscsteps = sum([len(i['electronic_steps'])
                            for i in vasprun.ionic_steps])
        self.assertEqual(29, len(vasprun.ionic_steps))
        self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))
        self.assertEqual(vasprun.lattice,
                         vasprun.lattice_rec.reciprocal_lattice)

        for i, step in enumerate(vasprun.ionic_steps):
            self.assertEqual(vasprun.structures[i], step["structure"])

        self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
            "structure"] for i in range(len(vasprun.ionic_steps))]))

        self.assertEqual(308, totalscsteps,
                         "Incorrect number of energies read from vasprun.xml")

        self.assertEqual(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
                         vasprun.atomic_symbols)
        self.assertEqual(vasprun.final_structure.composition.reduced_formula,
                         "LiFe4(PO4)4")
        self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
        self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
        self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
        self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
        self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
        expectedans = (2.539, 4.0906, 1.5516, False)
        (gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
        self.assertAlmostEqual(gap, expectedans[0])
        self.assertAlmostEqual(cbm, expectedans[1])
        self.assertAlmostEqual(vbm, expectedans[2])
        self.assertEqual(direct, expectedans[3])
        self.assertFalse(vasprun.is_hubbard)
        self.assertEqual(vasprun.potcar_symbols,
                         ['PAW_PBE Li 17Jan2003', 'PAW_PBE Fe 06Sep2000',
                          'PAW_PBE Fe 06Sep2000', 'PAW_PBE P 17Jan2003',
                          'PAW_PBE O 08Apr2002'])
        self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
        self.assertIsNotNone(vasprun.actual_kpoints,
                             "Actual kpoints cannot be read")
        self.assertIsNotNone(vasprun.actual_kpoints_weights,
                             "Actual kpoints weights cannot be read")
        for atomdoses in vasprun.pdos:
            for orbitaldos in atomdoses:
                self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")

        # test skipping ionic steps.
        vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)
        self.assertEqual(vasprun_skip.nionic_steps, 29)
        self.assertEqual(len(vasprun_skip.ionic_steps),
                         int(vasprun.nionic_steps / 3) + 1)
        self.assertEqual(len(vasprun_skip.ionic_steps),
                         len(vasprun_skip.structures))
        self.assertEqual(len(vasprun_skip.ionic_steps),
                         int(vasprun.nionic_steps / 3) + 1)
        # Check that nionic_steps is preserved no matter what.
        self.assertEqual(vasprun_skip.nionic_steps,
                         vasprun.nionic_steps)

        self.assertNotAlmostEqual(vasprun_skip.final_energy,
                                  vasprun.final_energy)

        # Test with ionic_step_offset
        vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)
        self.assertEqual(len(vasprun_offset.ionic_steps),
                         int(len(vasprun.ionic_steps) / 3) - 1)
        self.assertEqual(vasprun_offset.structures[0],
                         vasprun_skip.structures[2])

        self.assertTrue(vasprun_ggau.is_hubbard)
        self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
        self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[Spin.up][
                                   0][0][96][0], 0.0032)
        d = vasprun_ggau.as_dict()
        self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
        self.assertEqual(d["nelements"], 4)

        filepath = os.path.join(test_dir, 'vasprun.xml.unconverged')
        with warnings.catch_warnings(record=True) as w:
            # Cause all warnings to always be triggered.
            warnings.simplefilter("always")
            # Trigger a warning.
            vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)
            # Verify some things
            self.assertEqual(len(w), 1)
            self.assertTrue(issubclass(w[-1].category,
                                       UnconvergedVASPWarning))

            self.assertTrue(vasprun_unconverged.converged_ionic)
            self.assertFalse(vasprun_unconverged.converged_electronic)
            self.assertFalse(vasprun_unconverged.converged)

        filepath = os.path.join(test_dir, 'vasprun.xml.dfpt')
        vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)
        self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
        self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
        self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
        self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0], 3.33402531)
        self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1], -0.00559998)
        self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2], 3.31237357)
        self.assertTrue(vasprun_dfpt.converged)

        entry = vasprun_dfpt.get_computed_entry()
        entry = MaterialsProjectCompatibility(check_potcar_hash=False).process_entry(entry)
        self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
                               entry.energy)

        filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.ionic')
        vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)
        self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0], 515.73485838)
        self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1], -0.00263523)
        self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2], 19.02110169)

        filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.unconverged')
        vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)
        self.assertFalse(vasprun_dfpt_unconv.converged_electronic)
        self.assertTrue(vasprun_dfpt_unconv.converged_ionic)
        self.assertFalse(vasprun_dfpt_unconv.converged)

        vasprun_uniform = Vasprun(os.path.join(test_dir, "vasprun.xml.uniform"),
                                  parse_potcar_file=False)
        self.assertEqual(vasprun_uniform.kpoints.style,
                         Kpoints.supported_modes.Reciprocal)

        vasprun_no_pdos = Vasprun(os.path.join(test_dir, "Li_no_projected.xml"),
                                  parse_potcar_file=False)
        self.assertIsNotNone(vasprun_no_pdos.complete_dos)
        self.assertFalse(vasprun_no_pdos.dos_has_errors)

        vasprun_diel = Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric"),
                               parse_potcar_file=False)
        self.assertAlmostEqual(0.4294,vasprun_diel.dielectric[0][10])
        self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][0])
        self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][1])
        self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][2])
        self.assertAlmostEqual(0.0,vasprun_diel.dielectric[1][51][3])
        self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][0])
        self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][1])
        self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][2])
        self.assertAlmostEqual(0.0,vasprun_diel.dielectric[2][85][3])

        v = Vasprun(os.path.join(test_dir, "vasprun.xml.indirect.gz"))
        (gap, cbm, vbm, direct) = v.eigenvalue_band_properties
        self.assertFalse(direct)
Example #53
0
            _stderr_buffer_result, Exception):
        if isinstance(_stdout_buffer_result, Exception):
            log.warning("Failed to ensure that stdout is line buffered",
                        exc_info=_stdout_buffer_result)
        if isinstance(_stderr_buffer_result, Exception):
            log.warning("Failed to ensure that stderr is line buffered",
                        exc_info=_stderr_buffer_result)
        log.warning("Some stack traces may not appear because of this.")

    del _stderr_buffer_result, _stdout_buffer_result

    # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners
    # If the user doesn't want to see these, they can always change the global
    # warning settings in their test module.
    if not sys.warnoptions:
        warnings.simplefilter("default")

scheduler = Scheduler()
"""The global scheduler instance."""

regression_manager = None

plusargs = {}
"""A dictionary of "plusargs" handed to the simulation."""

# To save typing provide an alias to scheduler.add
fork = scheduler.add

# FIXME is this really required?
_rlock = threading.RLock()
Example #54
0
import os
import warnings

from genz.static.expyfun import ExperimentController
from genz.static.expyfun import _TempDir, _hide_window

warnings.simplefilter('always')

std_args = ['test']
std_kwargs = dict(participant='foo', session='01', full_screen=False,
                  window_size=(1, 1), verbose=True, noise_db=0, version='dev')


@_hide_window
def test_logging(ac='pyglet'):
    """Test logging to file (Pyglet)."""
    tempdir = _TempDir()
    orig_dir = os.getcwd()
    os.chdir(tempdir)
    try:
        with ExperimentController(*std_args, audio_controller=ac,
                                  response_device='keyboard',
                                  trigger_controller='dummy',
                                  **std_kwargs) as ec:
            test_name = ec._log_file
            stamp = ec.current_time
            ec.wait_until(stamp)  # wait_until called w/already passed timest.
            with warnings.catch_warnings(record=True):
                warnings.simplefilter('always')
                ec.load_buffer([1., -1., 1., -1., 1., -1.])  # RMS warning
Example #55
0
# -*- coding: utf-8 -*-
"""
Functions in this module correspond more or less to the functions described 
in the HFSS Scripting Guide (v 2013.11), Section "Material Script Commands".

At last count there were 2 functions implemented out of 5.
"""
from __future__ import division, print_function, unicode_literals, absolute_import

import warnings

from hycohanz.desktop import get_active_project

warnings.simplefilter('default')


def add_material(oDesktop,
                 material_name,
                 rel_permittivity=1,
                 rel_permeability=1,
                 cond=0,
                 diel_loss_tan=0,
                 mag_loss_tan=0,
                 mag_saturation=0,
                 lande_g=2,
                 delta_h=0):
    """
    Add Material.

    Parameters
    ----------
Example #56
0
def ignore_warnings():
    """Context manager to ignore warning within the with statement."""
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        yield
Example #57
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on November 6, 2017

@author: alfoa

comment: The ModelPlugIn Module is an Handler.
         It inquires all the modules contained in the ./raven/contrib/plugins
         and load the ones that refer to a model, constructing a '__interFaceDict' on the fly
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
#End compatibility block for Python 3----------------------------------------------------------------

#External Modules------------------------------------------------------------------------------------
import os
from glob import glob
import inspect
from collections import defaultdict
#External Modules End--------------------------------------------------------------------------------

#Internal Modules------------------------------------------------------------------------------------
from utils import utils
#Internal Modules End--------------------------------------------------------------------------------

__moduleInterfaceList = []
startDir = os.path.join(os.path.dirname(__file__),'../../plugins')
Example #58
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Shayantan Banerjee
path = "/data/shayantan/NBDriver/python/data"
This program derives the machine learning tool NBDriver
"""




import pandas as pd
import glob
import warnings
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
import numpy as np
from sklearn import metrics
from sklearn.ensemble import VotingClassifier
from imblearn.metrics import sensitivity_score
from imblearn.metrics import specificity_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.metrics import confusion_matrix
from sklearn.metrics import make_scorer
from sklearn.utils import shuffle
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.preprocessing import MinMaxScaler
from imblearn.under_sampling import RepeatedEditedNearestNeighbours
from sklearn.ensemble import ExtraTreesClassifier
Example #59
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
functions.py

Functions for stiTChR and its related scripts
"""

import collections as coll
import os
import re
import sys
from Bio.Seq import translate
from Bio import BiopythonWarning
import warnings
warnings.simplefilter('ignore', BiopythonWarning)

__version__ = '0.3.0'
__author__ = 'Jamie Heather'
__email__ = '*****@*****.**'

sys.tracebacklimit = 0
data_dir = os.path.normpath('../Data/')


def check_scripts_dir():
    """
    Check we're in the right directory (Scripts)
    """

    if not os.getcwd().endswith('Scripts'):
Example #60
0
import warnings

try:
    import mpmath as mp
except ImportError:
    try:
        import sympy.mpmath as mp
    except ImportError:
        pass

try:
    # Can remove when sympy #11255 is resolved; see
    # https://github.com/sympy/sympy/issues/11255
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", DeprecationWarning)
        from sympy.abc import x
except ImportError:
    pass


def lagrange_inversion(a):
    """Given a series

    f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),

    use the Lagrange inversion formula to compute a series

    g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)

    so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so