예제 #1
0
    def compute_psi_stats(self):
        #define some normal distributions
        mus = [sp.var('mu%i' % i, real=True) for i in range(self.input_dim)]
        Ss = [sp.var('S%i' % i, positive=True) for i in range(self.input_dim)]
        normals = [(2 * sp.pi * Si)**(-0.5) * sp.exp(-0.5 * (xi - mui)**2 / Si)
                   for xi, mui, Si in zip(self._sp_x, mus, Ss)]

        #do some integration!
        #self._sp_psi0 = ??
        self._sp_psi1 = self._sp_k
        for i in range(self.input_dim):
            print 'perfoming integrals %i of %i' % (i + 1, 2 * self.input_dim)
            sys.stdout.flush()
            self._sp_psi1 *= normals[i]
            self._sp_psi1 = sp.integrate(self._sp_psi1,
                                         (self._sp_x[i], -sp.oo, sp.oo))
            clear_cache()
        self._sp_psi1 = self._sp_psi1.simplify()

        #and here's psi2 (eek!)
        zprime = [sp.Symbol('zp%i' % i) for i in range(self.input_dim)]
        self._sp_psi2 = self._sp_k.copy() * self._sp_k.copy().subs(
            zip(self._sp_z, zprime))
        for i in range(self.input_dim):
            print 'perfoming integrals %i of %i' % (self.input_dim + i + 1,
                                                    2 * self.input_dim)
            sys.stdout.flush()
            self._sp_psi2 *= normals[i]
            self._sp_psi2 = sp.integrate(self._sp_psi2,
                                         (self._sp_x[i], -sp.oo, sp.oo))
            clear_cache()
        self._sp_psi2 = self._sp_psi2.simplify()
def test_jacobian_toy_field_3():
    clear_cache()

    def field_f(t, x):
        t = float(t)
        x = [float(y) for y in x]
        return x[0]**2 + 2 * x[0] + x[1], 3.0 * x[0] + 2.0

    def jacobian_f(t, x):
        t = float(t)
        x = [float(y) for y in x]
        return 2.0 * x[0] + 2.0, 1.0, 3.0, 0.0

    svf_f = gen_id.id_lagrangian(omega=(30, 30))
    jac_f_ground = jac.initialise_jacobian(svf_f)

    for i in range(0, 30):
        for j in range(0, 30):
            svf_f[i, j, 0, 0, :] = field_f(1, [i, j])
            jac_f_ground[i, j, 0, 0, :] = jacobian_f(1, [i, j])

    jac_f_numeric = jac.compute_jacobian(svf_f)

    pp = 2
    assert_array_almost_equal(jac_f_ground[pp:-pp, pp:-pp, 0, 0, :],
                              jac_f_numeric[pp:-pp, pp:-pp, 0, 0, :])
 def __init__(self, sympy_function, sympy_variables, args={}):
     self.sympy_function = sympy_function.subs(args)
     self.sympy_variables = sympy_variables
     self.lambdified = lambdify(self.sympy_variables, self.sympy_function)
     clear_cache()
     self.function = self.lambdified
     return None
def test_jacobian_toy_field_3_2(open_fig=False):
    clear_cache()

    svf_f = np.zeros((20, 20, 20, 1, 3))
    jac_f_ground = jac.initialise_jacobian(svf_f)

    print(jac_f_ground.shape)
예제 #5
0
 def alt_enumerate(self, cross_sections=None):
     '''only works when the set is a generating set for sortables and the top layer has all the same length!!!'''
     ml = max([len(s) for s in self])
     PPS = PegPermSet([s for s in self if len(s) == ml])
     (gf, cross_sections) = PPS.alt_cross_sections()
     gc.collect()
     print('\tDone computing cross_sections. There are',
           len(cross_sections), 'cross sections.')
     print('Starting to compute generating function for uncleans.')
     i = 0
     n = len(cross_sections)
     t = time.time()
     # print 'clean gf:',gf.simplify()
     for clean_perm in cross_sections.keys():
         if i % 10000 == 0 and i > 0:
             gf = gf.simplify()
         if i % 50000 == 0 and i > 0:
             clear_cache()
         if i % 10000 == 0 and i > 0:
             print('\t\t', i, 'of', n, '\ttook', (time.time() - t),
                   'seconds.')
             t = time.time()
         # gf -= clean_perm.csgf([])
         # print 'subtracting gf for',clean_perm,'with basis []'
         # print 'adding gf for',clean_perm,'with basis',cross_sections[clean_perm]
         gf += clean_perm.csgf(cross_sections[clean_perm])
         i += 1
     print('\tDone!')
     return gf.simplify()
예제 #6
0
 def alt_enumerate(self, cross_sections=None):
   '''only works when the set is a generating set for sortables and the top layer has all the same length!!!'''
   ml = max([len(s) for s in self])
   PPS = PegPermSet([s for s in self if len(s) == ml])
   (gf, cross_sections) = PPS.alt_cross_sections()
   gc.collect()
   print('\tDone computing cross_sections. There are',len(cross_sections),'cross sections.')
   print('Starting to compute generating function for uncleans.')
   i = 0
   n = len(cross_sections)
   t = time.time()
   # print 'clean gf:',gf.simplify()
   for clean_perm in cross_sections.keys():
     if i % 10000 == 0 and i > 0:
       gf = gf.simplify()
     if i % 50000 == 0 and i > 0:
       clear_cache()
     if i % 10000 == 0 and i > 0:
       print('\t\t',i,'of',n,'\ttook',(time.time()-t),'seconds.')
       t = time.time()
     # gf -= clean_perm.csgf([])
     # print 'subtracting gf for',clean_perm,'with basis []'
     # print 'adding gf for',clean_perm,'with basis',cross_sections[clean_perm]
     gf += clean_perm.csgf(cross_sections[clean_perm])
     i += 1
   print('\tDone!')
   return gf.simplify()
예제 #7
0
def clear_cache():
    cache.clear_cache()
    gc.collect()

    for key, val in _SymbolCache.items():
        if val() is None:
            del _SymbolCache[key]
def test_jacobian_toy_field_2():
    clear_cache()

    def field_f(t, x):
        t = float(t)
        x = [float(y) for y in x]
        return 0.5 * x[0] + 0.6 * x[1], 0.8 * x[1]

    def jacobian_f(t, x):
        t = float(t)
        x = [float(y) for y in x]
        return 0.5, 0.6, 0.0, 0.8

    svf_f = gen_id.id_lagrangian(omega=(20, 20))
    jac_f_ground = jac.initialise_jacobian(svf_f)

    for i in range(0, 20):
        for j in range(0, 20):
            svf_f[i, j, 0, 0, :] = field_f(1, [i, j])
            jac_f_ground[i, j, 0, 0, :] = jacobian_f(1, [i, j])

    jac_f_numeric = jac.compute_jacobian(svf_f)

    square_size = range(0, 20)
    assert_array_almost_equal(jac_f_ground[square_size, square_size, 0, 0, :],
                              jac_f_numeric[square_size, square_size, 0, 0, :])
예제 #9
0
    def __init__(self):

        # Upon creating a new model, clear the cache
        # Otherwise creating multiple models creates
        # problems because sympy() will not reevaluate
        # functions and the series accessor will not
        # get created.  Because sympy keeps this cache
        # around, will have to be careful if using these
        # models in a multi-threaded context.
        clear_cache()

        self.variables = collections.OrderedDict()
        self.parameters = collections.OrderedDict()
        self.solutions = list()
        self.equations = list()

        self._private_parameters = collections.OrderedDict()
        self._local_context = dict()
        self._var_default = None
        self._param_default = None

        self._need_function_update = True

        _add_functions(self._local_context)

        # Variables used to lambdify the expressions
        self._arg_list = None
        self._private_funcs = None

        self._solvers = dict()
        self._solvers['newton-raphson'] = NewtonRaphsonSolver(self)
        self._solvers['gauss-seidel'] = GaussSeidelSolver(self)
        self._solvers['broyden'] = BroydenSolver(self)
예제 #10
0
파일: sympykern.py 프로젝트: rajivsam/GPy
    def compute_psi_stats(self):
        # define some normal distributions
        mus = [sp.var("mu_%i" % i, real=True) for i in range(self.input_dim)]
        Ss = [sp.var("S_%i" % i, positive=True) for i in range(self.input_dim)]
        normals = [
            (2 * sp.pi * Si) ** (-0.5) * sp.exp(-0.5 * (xi - mui) ** 2 / Si) for xi, mui, Si in zip(self._sp_x, mus, Ss)
        ]

        # do some integration!
        # self._sp_psi0 = ??
        self._sp_psi1 = self._sp_k
        for i in range(self.input_dim):
            print "perfoming integrals %i of %i" % (i + 1, 2 * self.input_dim)
            sys.stdout.flush()
            self._sp_psi1 *= normals[i]
            self._sp_psi1 = sp.integrate(self._sp_psi1, (self._sp_x[i], -sp.oo, sp.oo))
            clear_cache()
        self._sp_psi1 = self._sp_psi1.simplify()

        # and here's psi2 (eek!)
        zprime = [sp.Symbol("zp%i" % i) for i in range(self.input_dim)]
        self._sp_psi2 = self._sp_k.copy() * self._sp_k.copy().subs(zip(self._sp_z, zprime))
        for i in range(self.input_dim):
            print "perfoming integrals %i of %i" % (self.input_dim + i + 1, 2 * self.input_dim)
            sys.stdout.flush()
            self._sp_psi2 *= normals[i]
            self._sp_psi2 = sp.integrate(self._sp_psi2, (self._sp_x[i], -sp.oo, sp.oo))
            clear_cache()
        self._sp_psi2 = self._sp_psi2.simplify()
예제 #11
0
def test_block_builder(ctx_factory,
                       ambient_dim,
                       block_builder_type,
                       index_sparsity_factor,
                       op_type,
                       visualize=False):
    """Test that block builders and full matrix builders actually match."""

    ctx = ctx_factory()
    queue = cl.CommandQueue(ctx)
    actx = PyOpenCLArrayContext(queue)

    # prevent cache explosion
    from sympy.core.cache import clear_cache
    clear_cache()

    if ambient_dim == 2:
        case = extra.CurveTestCase(
            name="ellipse",
            target_order=7,
            index_sparsity_factor=index_sparsity_factor,
            op_type=op_type,
            resolutions=[32],
            curve_fn=partial(ellipse, 3.0),
        )
예제 #12
0
def test_dielectric(ctx_getter, qbx_order, op_class, mode, visualize=False):
    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    import logging
    logging.basicConfig(level=logging.INFO)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for nelements in [30, 50, 70]:
        # prevent sympy cache 'splosion
        from sympy.core.cache import clear_cache
        clear_cache()

        errs = run_dielectric_test(
                cl_ctx, queue,
                nelements=nelements, qbx_order=qbx_order,
                op_class=op_class, mode=mode,
                visualize=visualize)

        eoc_rec.add_data_point(1/nelements, la.norm(list(errs), np.inf))

    print(eoc_rec)
    assert eoc_rec.order_estimate() > qbx_order - 0.5
예제 #13
0
def test_dielectric(ctx_factory, qbx_order, op_class, mode, visualize=False):
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)

    import logging
    logging.basicConfig(level=logging.INFO)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for nelements in [30, 50, 70]:
        # prevent sympy cache 'splosion
        from sympy.core.cache import clear_cache
        clear_cache()

        errs = run_dielectric_test(cl_ctx,
                                   queue,
                                   nelements=nelements,
                                   qbx_order=qbx_order,
                                   op_class=op_class,
                                   mode=mode,
                                   visualize=visualize)

        eoc_rec.add_data_point(1 / nelements, la.norm(list(errs), np.inf))

    print(eoc_rec)
    assert eoc_rec.order_estimate() > qbx_order - 0.5
예제 #14
0
파일: model.py 프로젝트: kennt/pylinsolve
    def __init__(self):

        # Upon creating a new model, clear the cache
        # Otherwise creating multiple models creates
        # problems because sympy() will not reevaluate
        # functions and the series accessor will not
        # get created.  Because sympy keeps this cache
        # around, will have to be careful if using these
        # models in a multi-threaded context.
        clear_cache()

        self.variables = collections.OrderedDict()
        self.parameters = collections.OrderedDict()
        self.solutions = list()
        self.equations = list()

        self._private_parameters = collections.OrderedDict()
        self._local_context = dict()
        self._var_default = None
        self._param_default = None

        self._need_function_update = True

        _add_functions(self._local_context)

        # Variables used to lambdify the expressions
        self._arg_list = None
        self._private_funcs = None

        self._solvers = dict()
        self._solvers['newton-raphson'] = NewtonRaphsonSolver(self)
        self._solvers['gauss-seidel'] = GaussSeidelSolver(self)
        self._solvers['broyden'] = BroydenSolver(self)
예제 #15
0
파일: main.py 프로젝트: alephu5/Soundbyte
def main(results):
    testholder = []
    for (test, dropsize) in testparams:
        datasrc = sprinkler(dropsize, files)
        testholder += [(test, datasrc)]
    failcount = 0
    while failcount != len(testparams) * len(files):
        for (test, datasrc) in testholder:
            fails = []
            for (fname, Drip) in datasrc.items():
                block = Drip.drip()
                if block == []:
                    fails += [(fname, datasrc)]
                    failcount += 1
                else:
                    print('Testing', fname, 'with', test.__name__, 'drip no.', Drip.dripno)
                    if not results.hasresult(Drip.dripno, fname, test.__name__):
                        while True:
                            try:
                                pvalue = test(block)
                                break
                            except(MemoryError):
                                clear_cache()
                        try:
                            for i, pval in enumerate(pvalue):
                                results.store(Drip.dripno, fname, test.__name__, i, pval)
                        except(TypeError):
                            results.store(Drip.dripno, fname, test.__name__, 1, pvalue)
            remfails(fails)
def test_reproducability():
    from sympy.core.cache import clear_cache

    output_0 = None
    for i in range(10):
        module_name = "Ololol"

        target = 'cpu'

        z, y, x = pystencils.fields("z, y, x: [20,40]")
        a = sympy.Symbol('a')

        forward_assignments = pystencils.AssignmentCollection(
            {z[0, 0]: x[0, 0] * sympy.log(a * x[0, 0] * y[0, 0])})

        backward_assignments = create_backward_assignments(forward_assignments)

        forward_ast = pystencils.create_kernel(forward_assignments, target)
        forward_ast.function_name = 'forward'
        backward_ast = pystencils.create_kernel(backward_assignments, target)
        backward_ast.function_name = 'backward'
        new_output = str(TorchModule(module_name, [forward_ast, backward_ast]))
        TorchModule(module_name, [forward_ast, backward_ast]).compile()

        clear_cache()

        if not output_0:
            output_0 = new_output

        assert output_0 == new_output
예제 #17
0
    def clear(cls, force=True):
        # Wipe out the "true" SymPy cache
        cache.clear_cache()

        # Wipe out the hidden module-private SymPy caches
        sympy.polys.rootoftools.ComplexRootOf.clear_cache()
        sympy.polys.rings._ring_cache.clear()
        sympy.polys.fields._field_cache.clear()
        sympy.polys.domains.modularinteger._modular_integer_cache.clear()

        # Maybe trigger garbage collection
        if force is False:
            if cls.ncalls_w_force_false + 1 == cls.force_ths:
                # Case 1: too long since we called gc.collect, let's do it now
                gc.collect()
                cls.ncalls_w_force_false = 0
            elif any(i.nbytes > cls.gc_ths for i in _SymbolCache.values()):
                # Case 2: we got big objects in cache, we try to reclaim memory
                gc.collect()
                cls.ncalls_w_force_false = 0
            else:
                # We won't call gc.collect() this time
                cls.ncalls_w_force_false += 1
        else:
            gc.collect()

        for key, obj in list(_SymbolCache.items()):
            if obj() is None:
                del _SymbolCache[key]
예제 #18
0
def write_in_chunks(lines, mainfile, deffile, name, chunk_size):
	funcname = "definitions_" + name
	
	first_chunk = []
	try:
		for i in range(chunk_size+1):
			first_chunk.append(next(lines))
	except StopIteration:
		for line in first_chunk:
			mainfile.write(line)
	else:
		lines = chain(first_chunk, lines)
		
		while True:
			mainfile.write(funcname + "();\n")
			deffile.write("void " + funcname + "(void){\n")
			
			try:
				for i in range(chunk_size):
					deffile.write(next(lines))
			except StopIteration:
				break
			finally:
				deffile.write("}\n")
			
			funcname = count_up(funcname)
			clear_cache()
예제 #19
0
def test_off_surface_eval(ctx_factory, use_fmm, do_plot=False):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    nelements = 30
    target_order = 8
    qbx_order = 3
    if use_fmm:
        fmm_order = qbx_order
    else:
        fmm_order = False

    mesh = make_curve_mesh(partial(ellipse, 3),
                           np.linspace(0, 1, nelements + 1), target_order)

    from pytential.qbx import QBXLayerPotentialSource
    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory

    pre_density_discr = Discretization(
        cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order))
    qbx, _ = QBXLayerPotentialSource(
        pre_density_discr,
        4 * target_order,
        qbx_order,
        fmm_order=fmm_order,
    ).with_refinement()

    density_discr = qbx.density_discr

    from sumpy.kernel import LaplaceKernel
    op = sym.D(LaplaceKernel(2), sym.var("sigma"), qbx_forced_limit=-2)

    sigma = density_discr.zeros(queue) + 1

    fplot = FieldPlotter(np.zeros(2), extent=0.54, npoints=30)
    from pytential.target import PointsTarget
    fld_in_vol = bind((qbx, PointsTarget(fplot.points)), op)(queue,
                                                             sigma=sigma)

    err = cl.clmath.fabs(fld_in_vol - (-1))

    linf_err = cl.array.max(err).get()
    print("l_inf error:", linf_err)

    if do_plot:
        fplot.show_scalar_in_matplotlib(fld_in_vol.get())
        import matplotlib.pyplot as pt
        pt.colorbar()
        pt.show()

    assert linf_err < 1e-3
예제 #20
0
def test_off_surface_eval(actx_factory, use_fmm, visualize=False):
    logging.basicConfig(level=logging.INFO)

    actx = actx_factory()

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    nelements = 30
    target_order = 8
    qbx_order = 3
    if use_fmm:
        fmm_order = qbx_order
    else:
        fmm_order = False

    mesh = mgen.make_curve_mesh(partial(mgen.ellipse, 3),
                                np.linspace(0, 1, nelements + 1), target_order)

    from pytential.qbx import QBXLayerPotentialSource
    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory

    pre_density_discr = Discretization(
        actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order))
    qbx = QBXLayerPotentialSource(
        pre_density_discr,
        4 * target_order,
        qbx_order,
        fmm_order=fmm_order,
    )

    from pytential.target import PointsTarget
    fplot = FieldPlotter(np.zeros(2), extent=0.54, npoints=30)
    targets = PointsTarget(actx.freeze(actx.from_numpy(fplot.points)))

    places = GeometryCollection((qbx, targets))
    density_discr = places.get_discretization(places.auto_source.geometry)

    from sumpy.kernel import LaplaceKernel
    op = sym.D(LaplaceKernel(2), sym.var("sigma"), qbx_forced_limit=-2)

    sigma = density_discr.zeros(actx) + 1
    fld_in_vol = bind(places, op)(actx, sigma=sigma)
    fld_in_vol_exact = -1

    linf_err = actx.to_numpy(
        actx.np.linalg.norm(fld_in_vol - fld_in_vol_exact, ord=np.inf))
    logger.info("l_inf error: %.12e", linf_err)

    if visualize:
        fplot.show_scalar_in_matplotlib(actx.to_numpy(fld_in_vol))
        import matplotlib.pyplot as pt
        pt.colorbar()
        pt.show()

    assert linf_err < 1e-3
예제 #21
0
def test_issue_7688():
    from sympy.core.function import Function, UndefinedFunction

    f = Function('f')  # actually an UndefinedFunction
    clear_cache()
    class A(UndefinedFunction):
        pass
    a = A('f')
    assert isinstance(a, type(f))
예제 #22
0
def test_issue_7688():
    from sympy.core.function import Function, UndefinedFunction

    f = Function('f')  # actually an UndefinedFunction
    clear_cache()
    class A(UndefinedFunction):
        pass
    a = A('f')
    assert isinstance(a, type(f))
예제 #23
0
def main(n, bench):
    func = globals()['bench_' + bench]
    l = []
    for i in range(n):
        clear_cache()
        t0 = time.time()
        func()
        l.append(time.time() - t0)
    return l
예제 #24
0
파일: bm_sympy.py 프로젝트: kmod/icbd
def main(n, bench):
    func = globals()['bench_' + bench]
    l = []
    for i in range(n):
        clear_cache()
        t0 = time.time()
        func()
        l.append(time.time() - t0)
    return l
예제 #25
0
def test_Basic_keep_sign():
    Basic.keep_sign = True
    assert Mul(x - 1, x + 1) == (x - 1) * (x + 1)
    assert (1 / (x - 1)).as_coeff_mul()[0] == +1

    clear_cache()

    Basic.keep_sign = False
    assert Mul(x - 1, x + 1) == -(1 - x) * (1 + x)
    assert (1 / (x - 1)).as_coeff_mul()[0] == -1
예제 #26
0
파일: test_expr.py 프로젝트: bibile/sympy
def test_Basic_keep_sign():
    Basic.keep_sign = True
    assert Mul(x - 1, x + 1) == (x - 1)*(x + 1)
    assert (1/(x - 1)).as_coeff_terms()[0] == +1

    clear_cache()

    Basic.keep_sign = False
    assert Mul(x - 1, x + 1) == -(1 - x)*(1 + x)
    assert (1/(x - 1)).as_coeff_terms()[0] == -1
 def _lambdify(self):
     lambda_list = []
     vars = [range_[0] for range_ in self._ranges[1:]]
     for sym_sol in self.sym_sols:
         lambda_list.append(lambdify(vars, sym_sol))
     self.__call__.__func__.__doc__ += ('Function signature is f(' +
                                        ','.join([str(var)
                                                  for var in vars]) + ')\n')
     clear_cache()
     return vars, lambda_list
예제 #28
0
 def _lambdify(self):
     lambda_list = []
     vars = [range_[0] for range_ in self._ranges[1:]]
     for sym_sol in self.sym_sols:
         lambda_list.append(lambdify(vars,sym_sol))
     self.__call__.__func__.__doc__ += ('Function signature is f('
                                        +','.join([str(var) for var in vars]
                                                  )+')\n')
     clear_cache()
     return vars,lambda_list
예제 #29
0
파일: runtests.py 프로젝트: mattcurry/sympy
    def test_file(self, filename):
        clear_cache()

        import unittest
        from StringIO import StringIO

        rel_name = filename[len(self._root_dir)+1:]
        module = rel_name.replace(os.sep, '.')[:-3]
        setup_pprint()
        try:
            module = pdoctest._normalize_module(module)
            tests = SymPyDocTestFinder().find(module)
        except:
            self._reporter.import_error(filename, sys.exc_info())
            return

        tests = [test for test in tests if len(test.examples) > 0]
        # By default (except for python 2.4 in which it was broken) tests
        # are sorted by alphabetical order by function name. We sort by line number
        # so one can edit the file sequentially from bottom to top...HOWEVER
        # if there are decorated functions, their line numbers will be too large
        # and for now one must just search for these by text and function name.
        tests.sort(key=lambda x: -x.lineno)

        if not tests:
            return
        self._reporter.entering_filename(filename, len(tests))
        for test in tests:
            assert len(test.examples) != 0
            runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS | \
                    pdoctest.NORMALIZE_WHITESPACE)
            old = sys.stdout
            new = StringIO()
            sys.stdout = new
            # If the testing is normal, the doctests get importing magic to
            # provide the global namespace. If not normal (the default) then
            # then must run on their own; all imports must be explicit within
            # a function's docstring. Once imported that import will be
            # available to the rest of the tests in a given function's
            # docstring (unless clear_globs=True below).
            if not self._normal:
                test.globs = {}
                # if this is uncommented then all the test would get is what
                # comes by default with a "from sympy import *"
                #exec('from sympy import *') in test.globs
            try:
                f, t = runner.run(test, out=new.write, clear_globs=False)
            finally:
                sys.stdout = old
            if f > 0:
                self._reporter.doctest_fail(test.name, new.getvalue())
            else:
                self._reporter.test_pass()
        self._reporter.leaving_filename()
예제 #30
0
def test_issue_7687():
    from sympy.core.function import Function
    from sympy.abc import x
    f = Function('f')(x)
    ff = Function('f')(x)
    match_with_cache = ff.matches(f)
    assert isinstance(f, type(ff))
    clear_cache()
    ff = Function('f')(x)
    assert isinstance(f, type(ff))
    assert match_with_cache == ff.matches(f)
예제 #31
0
def test_issue_7687():
    from sympy.core.function import Function
    from sympy.abc import x
    f = Function('f')(x)
    ff = Function('f')(x)
    match_with_cache = ff.matches(f)
    assert isinstance(f, type(ff))
    clear_cache()
    ff = Function('f')(x)
    assert isinstance(f, type(ff))
    assert match_with_cache == ff.matches(f)
예제 #32
0
def bench_sympy(loops, func):
    timer = pyperf.perf_counter
    dt = 0

    for _ in range(loops):
        # Don't benchmark clear_cache(), exclude it of the benchmark
        clear_cache()

        t0 = timer()
        func()
        dt += (timer() - t0)

    return dt
예제 #33
0
def bench_sympy(loops, func):
    timer = perf.perf_counter
    dt = 0

    for _ in xrange(loops):
        # Don't benchmark clear_cache(), exclude it of the benchmark
        clear_cache()

        t0 = timer()
        func()
        dt += (timer() - t0)

    return dt
예제 #34
0
def test_pow_eval_subs_no_cache():
    # Tests pull request 9376 is working
    from sympy.core.cache import clear_cache

    s = 1 / sqrt(x**2)
    # This bug only appeared when the cache was turned off.
    # We need to approximate running this test without the cache.
    # This creates approximately the same situation.
    clear_cache()

    # This used to fail with a wrong result.
    # It incorrectly returned 1/sqrt(x**2) before this pull request.
    result = s.subs(sqrt(x**2), y)
    assert result == 1 / y
예제 #35
0
파일: test_subs.py 프로젝트: Lenqth/sympy
def test_pow_eval_subs_no_cache():
    # Tests pull request 9376 is working
    from sympy.core.cache import clear_cache

    s = 1/sqrt(x**2)
    # This bug only appeared when the cache was turned off.
    # We need to approximate running this test without the cache.
    # This creates approximately the same situation.
    clear_cache()

    # This used to fail with a wrong result.
    # It incorrectly returned 1/sqrt(x**2) before this pull request.
    result = s.subs(sqrt(x**2), y)
    assert result == 1/y
예제 #36
0
    def __call__(self,*args):
        if len(args) != len(self.sympy_variables):
            print 'args = ',args
            print 'sympy_vars = ',self.sympy_variables
            raise Error('invalid argument list given in call to Integrand!')
        import pdb;pdb.set_trace()
        out = self.lambdified(*args)
        out1 = self.ctypesified(len(args),tuple(args))
        print out - out1
        import pdb;pdb.set_trace()
#        print (out-out2)**2
#        exit()
        clear_cache()
        return out
예제 #37
0
 def __call__(self, *args):
     if len(args) != len(self.sympy_variables):
         print 'args = ', args
         print 'sympy_vars = ', self.sympy_variables
         raise Error('invalid argument list given in call to Integrand!')
     import pdb
     pdb.set_trace()
     out = self.lambdified(*args)
     out1 = self.ctypesified(len(args), tuple(args))
     print out - out1
     import pdb
     pdb.set_trace()
     #        print (out-out2)**2
     #        exit()
     clear_cache()
     return out
예제 #38
0
def test_integral_equation(ctx_getter, case, visualize=False):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    if USE_SYMENGINE and case.fmm_backend is None:
        pytest.skip("https://gitlab.tiker.net/inducer/sumpy/issues/25")

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    from pytools.convergence import EOCRecorder
    print("qbx_order: %d, %s" % (case.qbx_order, case))

    eoc_rec_target = EOCRecorder()
    eoc_rec_td = EOCRecorder()

    have_error_data = False
    for resolution in case.resolutions:
        result = run_int_eq_test(cl_ctx, queue, case, resolution,
                visualize=visualize)

        if result.rel_err_2 is not None:
            have_error_data = True
            eoc_rec_target.add_data_point(result.h_max, result.rel_err_2)

        if result.rel_td_err_inf is not None:
            eoc_rec_td.add_data_point(result.h_max, result.rel_td_err_inf)

    if case.bc_type == "dirichlet":
        tgt_order = case.qbx_order
    elif case.bc_type == "neumann":
        tgt_order = case.qbx_order-1
    else:
        assert False

    if have_error_data:
        print("TARGET ERROR:")
        print(eoc_rec_target)
        assert eoc_rec_target.order_estimate() > tgt_order - 1.3

        if case.check_tangential_deriv:
            print("TANGENTIAL DERIVATIVE ERROR:")
            print(eoc_rec_td)
            assert eoc_rec_td.order_estimate() > tgt_order - 2.3
예제 #39
0
 def alt_cross_sections(self):
     print('Starting to compute downset.')
     (gf, uc) = self.alt_downset()
     unclean = PegPermSet()
     uncleanlist = list()
     for PP in uc.keys():
         uncleanlist.extend(uc[PP])
     unclean = set(uncleanlist)
     print('\tDownset done. Contains', len(unclean),
           'unclean peg permutations.')
     # print 'Starting to compactify.'
     # unclean.compactify()
     # print '\tDone compactifying. Now contains',len(unclean),'peg permutations.'
     print(
         'Starting to compute cross sections of UNCLEAN peg permutations.')
     cross_sections = dict()
     i = 1
     pairs = list()
     print('\tCleaning, finding bases, and loading pairs.')
     n = len(unclean)
     for P in unclean:
         if i % 20000 == 0:
             print('\t\t', i, 'of', n, '...')
         cp = P.clean()
         b = P.clean_basis()
         pairs.append((cp, b))
         cross_sections[cp] = VectorSet([-1])
         i += 1
     i = 1
     del unclean
     clear_cache()
     print('\tUnioning bases.')
     for (cleaned_perm, V) in pairs:
         if i % 200000 == 0:
             print('\t\t', i, 'of', n, '... dict_size =',
                   len(cross_sections))
         # if cleaned_perm in cross_sections.keys():
         cross_sections[cleaned_perm] = V.basis_union(
             cross_sections[cleaned_perm])
         # else:
         # cross_sections[cleaned_perm] = V
         i += 1
     del pairs
     clear_cache()
     return (gf.simplify(), cross_sections)
예제 #40
0
    def clear(cls, force=True):
        # Wipe out the "true" SymPy cache
        cache.clear_cache()

        # Wipe out the hidden module-private SymPy caches
        sympy.polys.rootoftools.ComplexRootOf.clear_cache()
        sympy.polys.rings._ring_cache.clear()
        sympy.polys.fields._field_cache.clear()
        sympy.polys.domains.modularinteger._modular_integer_cache.clear()

        # Take a copy of the dictionary so we can safely iterate over it
        # even if another thread is making changes

        # mydict.copy() is safer than list(mydict) for getting an unchanging list
        # See https://bugs.python.org/issue40327 for terrifying discussion
        # on this issue.
        cache_copied = _SymbolCache.copy()

        # Maybe trigger garbage collection
        if force is False:
            if cls.ncalls_w_force_false + 1 == cls.force_ths:
                # Case 1: too long since we called gc.collect, let's do it now
                gc.collect()
                cls.ncalls_w_force_false = 0
            elif any(i.nbytes > cls.gc_ths for i in cache_copied.values()):
                # Case 2: we got big objects in cache, we try to reclaim memory
                gc.collect()
                cls.ncalls_w_force_false = 0
            else:
                # We won't call gc.collect() this time
                cls.ncalls_w_force_false += 1
        else:
            gc.collect()

        for key in cache_copied:
            obj = _SymbolCache.get(key)
            if obj is None:
                # deleted by another thread since we took the copy
                continue
            if obj() is None:
                # pop(x, None) does not error if already gone
                # (key could be removed in another thread since get() above)
                _SymbolCache.pop(key, None)
예제 #41
0
def test_integral_equation(
        ctx_getter, curve_name, curve_f, qbx_order, bc_type, loc_sign, k,
        target_order=7, source_order=None):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    from pytools.convergence import EOCRecorder
    print(("curve_name: %s, qbx_order: %d, bc_type: %s, loc_sign: %s, "
            "helmholtz_k: %s"
            % (curve_name, qbx_order, bc_type, loc_sign, k)))

    eoc_rec_target = EOCRecorder()
    eoc_rec_td = EOCRecorder()

    for nelements in [30, 40, 50]:
        result = run_int_eq_test(
                cl_ctx, queue, curve_f, nelements, qbx_order,
                bc_type, loc_sign, k, target_order=target_order,
                source_order=source_order)

        eoc_rec_target.add_data_point(1/nelements, result.rel_err_2)
        eoc_rec_td.add_data_point(1/nelements, result.rel_td_err_inf)

    if bc_type == "dirichlet":
        tgt_order = qbx_order
    elif bc_type == "neumann":
        tgt_order = qbx_order-1
    else:
        assert False

    print("TARGET ERROR:")
    print(eoc_rec_target)
    assert eoc_rec_target.order_estimate() > tgt_order - 1.3

    print("TANGENTIAL DERIVATIVE ERROR:")
    print(eoc_rec_td)
    assert eoc_rec_td.order_estimate() > tgt_order - 2.3
예제 #42
0
def separate_shells_dwi(nib_dwi, num_initial_dir_to_skip=7, num_shells=3):
    """
    Return a list of num_shell nibabel images, one image per shell.
    :param nib_dwi:
    :param num_initial_dir_to_skip:
    :param num_shells:
    :return:
    """
    im_data = nib_dwi.get_data()[..., num_initial_dir_to_skip:]

    list_nib_dwi_per_shells = []

    for i in range(num_shells):
        slice_i_data = im_data[..., i::num_shells]
        im_slice_i = set_new_data(nib_dwi, slice_i_data)
        list_nib_dwi_per_shells.append(im_slice_i)

        clear_cache()

    return list_nib_dwi_per_shells
예제 #43
0
 def alt_cross_sections(self):
   print('Starting to compute downset.')
   (gf, uc) = self.alt_downset()
   unclean = PegPermSet()
   uncleanlist = list()
   for PP in uc.keys():
     uncleanlist.extend(uc[PP])
   unclean = set(uncleanlist )
   print('\tDownset done. Contains',len(unclean),'unclean peg permutations.')
   # print 'Starting to compactify.'
   # unclean.compactify()
   # print '\tDone compactifying. Now contains',len(unclean),'peg permutations.'
   print('Starting to compute cross sections of UNCLEAN peg permutations.')
   cross_sections = dict()
   i = 1
   pairs = list()
   print('\tCleaning, finding bases, and loading pairs.')
   n = len(unclean)
   for P in unclean:
     if i % 20000 == 0:
       print('\t\t',i,'of',n,'...')
     cp = P.clean()
     b = P.clean_basis()
     pairs.append((cp,b))
     cross_sections[cp] = VectorSet([-1])
     i += 1
   i = 1
   del unclean
   clear_cache()
   print('\tUnioning bases.')
   for (cleaned_perm, V) in pairs:
     if i % 200000 == 0:
       print('\t\t',i,'of',n,'... dict_size =',len(cross_sections))
     # if cleaned_perm in cross_sections.keys():
     cross_sections[cleaned_perm] = V.basis_union(cross_sections[cleaned_perm])
     # else:
       # cross_sections[cleaned_perm] = V
     i += 1
   del pairs
   clear_cache()
   return (gf.simplify(), cross_sections)
예제 #44
0
 def enumerate(self, cross_sections=None):
   if cross_sections is None:
     cross_sections = self.cross_sections()
   gc.collect()
   print('\tDone computing cross_sections. There are',len(cross_sections),'cross sections.')
   print('Starting to compute generating function.')
   gf = 0
   i = 0
   n = len(cross_sections)
   t = time.time()
   for clean_perm in cross_sections.keys():
     if i % 10000 == 0 and i > 0:
       gf = gf.simplify()
     if i % 50000 == 0 and i > 0:
       clear_cache()
     if i % 1000 == 0 and i > 0:
       print('\t\t',i,'of',n,'\ttook',(time.time()-t),'seconds.')
       t = time.time()
     gf += clean_perm.csgf(cross_sections[clean_perm])
     i += 1
   print('\tDone!')
   return gf.simplify()
예제 #45
0
 def sum_gfs_no_basis(self, S, only_clean=False):
   i = 0
   gf = 0
   n = len(S)
   t = time.time()
   print('\t\tComputing GF.')
   for PP in S:
     i += 1
     if i % 100000 == 0 and i > 0:
       print('\t\t\t',i,'of',n,'\ttook',(time.time()-t),'seconds.')
       t = time.time()
     if not PP.is_compact():
       continue
     if only_clean and not PP.is_compact_and_clean():
       continue
     if i % 10000 == 0 and i > 0:
       gf = gf.simplify()
     if i % 50000 == 0 and i > 0:
       clear_cache()
     gf += PP.csgf([])
     # print 'adding gf for',PP,'with basis []'
     
   return gf
예제 #46
0
    def iteration(self):
        """
        GeneticSearch class's iteration function
        """
        self.iterations += 1

        while True:
            # We have to clear Sympy's cache before and after a random search,
            # otherwise we will bump into a Sympy cache key error.
            # The reason for this is unknown to me, but clearing the cache
            # has somehow solved this problem completely.
            clear_cache()

            rs = RandomSearch(self.fw,
                              search_length=self.search_length,
                              equation_length=self.equation_length,
                              starting_equations=self.starting_equations)

            rs.search(return_constituents=True)
            equation = rs.get_best_equations(k=0)

            clear_cache()

            if len(equation) == 0:
                print("Iteration produced bad results, retrying.")
            elif equation[0]["testing_r2"] < self.best_r2_score:
                print(
                    "Iteration got {} which is less than {}, retrying.".format(
                        equation[0]["testing_r2"], self.best_r2_score))
            else:
                print("Iteration produced {} equations.".format(len(equation)))
                equation = equation[0]
                self.best_r2_score = equation["testing_r2"]
                break

        self.starting_equations = equation["reconstruction"]
        return equation
예제 #47
0
파일: _helpers.py 프로젝트: Omer80/jitcode
def write_in_chunks(lines, mainfile, deffile, name, chunk_size, arguments):
    funcname = "definitions_" + name

    first_chunk = []
    try:
        for i in range(chunk_size + 1):
            first_chunk.append(next(lines))
    except StopIteration:
        for line in first_chunk:
            mainfile.write(line)
    else:
        lines = chain(first_chunk, lines)

        while True:
            mainfile.write(funcname + "(")
            deffile.write("void " + funcname + "(")
            if arguments:
                mainfile.write(", ".join(argument[0]
                                         for argument in arguments))
                deffile.write(", ".join(argument[1] + " " + argument[0]
                                        for argument in arguments))
            else:
                deffile.write("void")
            mainfile.write(");\n")
            deffile.write("){\n")

            try:
                for i in range(chunk_size):
                    deffile.write(next(lines))
            except StopIteration:
                break
            finally:
                deffile.write("}\n")

            funcname = count_up(funcname)
            clear_cache()
예제 #48
0
    def sum_gfs_no_basis(self, S, only_clean=False):
        i = 0
        gf = 0
        n = len(S)
        t = time.time()
        print('\t\tComputing GF.')
        for PP in S:
            i += 1
            if i % 100000 == 0 and i > 0:
                print('\t\t\t', i, 'of', n, '\ttook', (time.time() - t),
                      'seconds.')
                t = time.time()
            if not PP.is_compact():
                continue
            if only_clean and not PP.is_compact_and_clean():
                continue
            if i % 10000 == 0 and i > 0:
                gf = gf.simplify()
            if i % 50000 == 0 and i > 0:
                clear_cache()
            gf += PP.csgf([])
            # print 'adding gf for',PP,'with basis []'

        return gf
예제 #49
0
def test_equivalence(stencil, compressible, method, force):
    relaxation_rates = [1.8, 1.7, 1.0, 1.0, 1.0, 1.0]
    stencil = LBStencil(stencil)
    clear_cache()
    domain_size = (10, 20) if stencil.D == 2 else (5, 10, 7)
    lbm_config = LBMConfig(stencil=stencil,
                           method=method,
                           compressible=compressible,
                           relaxation_rates=relaxation_rates,
                           force_model=ForceModel.GUO,
                           force=force)
    lbm_opt_split = LBMOptimisation(split=True)
    lbm_opt = LBMOptimisation(split=False)

    with_split = create_lid_driven_cavity(domain_size=domain_size,
                                          lbm_config=lbm_config,
                                          lbm_optimisation=lbm_opt_split)
    without_split = create_lid_driven_cavity(domain_size=domain_size,
                                             lbm_config=lbm_config,
                                             lbm_optimisation=lbm_opt)
    with_split.run(100)
    without_split.run(100)
    np.testing.assert_almost_equal(with_split.velocity_slice(),
                                   without_split.velocity_slice())
예제 #50
0
 def enumerate(self, cross_sections=None):
     if cross_sections is None:
         cross_sections = self.cross_sections()
     gc.collect()
     print('\tDone computing cross_sections. There are',
           len(cross_sections), 'cross sections.')
     print('Starting to compute generating function.')
     gf = 0
     i = 0
     n = len(cross_sections)
     t = time.time()
     for clean_perm in cross_sections.keys():
         if i % 10000 == 0 and i > 0:
             gf = gf.simplify()
         if i % 50000 == 0 and i > 0:
             clear_cache()
         if i % 1000 == 0 and i > 0:
             print('\t\t', i, 'of', n, '\ttook', (time.time() - t),
                   'seconds.')
             t = time.time()
         gf += clean_perm.csgf(cross_sections[clean_perm])
         i += 1
     print('\tDone!')
     return gf.simplify()
예제 #51
0
파일: model.py 프로젝트: lowks/pylinsolve
    def __init__(self):

        # Upon creating a new model, clear the cache
        # Otherwise creating multiple models creates
        # problems because sympy() will not reevaluate
        # functions and the series accessor will not
        # get created.  Because sympy keeps this cache
        # around, will have to be careful if using these
        # models in a multi-threaded context.
        clear_cache()

        self.variables = collections.OrderedDict()
        self.parameters = collections.OrderedDict()
        self.solutions = list()
        self.equations = list()

        self._private_parameters = collections.OrderedDict()
        self._local_context = dict()
        self._var_default = None
        self._param_default = None

        self._need_function_update = True

        _add_functions(self._local_context)
예제 #52
0
def test_off_surface_eval(ctx_getter, use_fmm, do_plot=False):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    nelements = 30
    target_order = 8
    qbx_order = 3
    if use_fmm:
        fmm_order = qbx_order
    else:
        fmm_order = False

    mesh = make_curve_mesh(partial(ellipse, 3),
            np.linspace(0, 1, nelements+1),
            target_order)

    from pytential.qbx import QBXLayerPotentialSource
    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory

    pre_density_discr = Discretization(
            cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order))
    qbx, _ = QBXLayerPotentialSource(
            pre_density_discr,
            4*target_order,
            qbx_order,
            fmm_order=fmm_order,
            ).with_refinement()

    density_discr = qbx.density_discr

    from sumpy.kernel import LaplaceKernel
    op = sym.D(LaplaceKernel(2), sym.var("sigma"), qbx_forced_limit=-2)

    sigma = density_discr.zeros(queue) + 1

    fplot = FieldPlotter(np.zeros(2), extent=0.54, npoints=30)
    from pytential.target import PointsTarget
    fld_in_vol = bind(
            (qbx, PointsTarget(fplot.points)),
            op)(queue, sigma=sigma)

    err = cl.clmath.fabs(fld_in_vol - (-1))

    linf_err = cl.array.max(err).get()
    print("l_inf error:", linf_err)

    if do_plot:
        fplot.show_scalar_in_matplotlib(fld_in_vol.get())
        import matplotlib.pyplot as pt
        pt.colorbar()
        pt.show()

    assert linf_err < 1e-3
예제 #53
0
def test_off_surface_eval_vs_direct(ctx_getter,  do_plot=False):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    nelements = 300
    target_order = 8
    qbx_order = 3

    mesh = make_curve_mesh(WobblyCircle.random(8, seed=30),
                np.linspace(0, 1, nelements+1),
                target_order)

    from pytential.qbx import QBXLayerPotentialSource
    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory

    pre_density_discr = Discretization(
            cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order))
    direct_qbx, _ = QBXLayerPotentialSource(
            pre_density_discr, 4*target_order, qbx_order,
            fmm_order=False,
            target_association_tolerance=0.05,
            ).with_refinement()
    fmm_qbx, _ = QBXLayerPotentialSource(
            pre_density_discr, 4*target_order, qbx_order,
            fmm_order=qbx_order + 3,
            _expansions_in_tree_have_extent=True,
            target_association_tolerance=0.05,
            ).with_refinement()

    fplot = FieldPlotter(np.zeros(2), extent=5, npoints=1000)
    from pytential.target import PointsTarget
    ptarget = PointsTarget(fplot.points)
    from sumpy.kernel import LaplaceKernel

    op = sym.D(LaplaceKernel(2), sym.var("sigma"), qbx_forced_limit=None)

    from pytential.qbx import QBXTargetAssociationFailedException
    try:
        direct_density_discr = direct_qbx.density_discr
        direct_sigma = direct_density_discr.zeros(queue) + 1
        direct_fld_in_vol = bind((direct_qbx, ptarget), op)(
                queue, sigma=direct_sigma)

    except QBXTargetAssociationFailedException as e:
        fplot.show_scalar_in_matplotlib(e.failed_target_flags.get(queue))
        import matplotlib.pyplot as pt
        pt.show()
        raise

    fmm_density_discr = fmm_qbx.density_discr
    fmm_sigma = fmm_density_discr.zeros(queue) + 1
    fmm_fld_in_vol = bind((fmm_qbx, ptarget), op)(queue, sigma=fmm_sigma)

    err = cl.clmath.fabs(fmm_fld_in_vol - direct_fld_in_vol)

    linf_err = cl.array.max(err).get()
    print("l_inf error:", linf_err)

    if do_plot:
        #fplot.show_scalar_in_mayavi(0.1*.get(queue))
        fplot.write_vtk_file("potential.vts", [
            ("fmm_fld_in_vol", fmm_fld_in_vol.get(queue)),
            ("direct_fld_in_vol", direct_fld_in_vol.get(queue))
            ])

    assert linf_err < 1e-3
예제 #54
0
파일: runtests.py 프로젝트: meo-meo/sympy
    def test_file(self, filename):
        clear_cache()

        import unittest
        from StringIO import StringIO

        rel_name = filename[len(self._root_dir)+1:]
        dirname, file = os.path.split(filename)
        module = rel_name.replace(os.sep, '.')[:-3]

        if rel_name.startswith("examples"):
            # Example files do not have __init__.py files,
            # So we have to temporarily extend sys.path to import them
            sys.path.insert(0, dirname)
            module = file[:-3] # remove ".py"
        setup_pprint()
        try:
            module = pdoctest._normalize_module(module)
            tests = SymPyDocTestFinder().find(module)
        except:
            self._reporter.import_error(filename, sys.exc_info())
            return
        finally:
            if rel_name.startswith("examples"):
                del sys.path[0]

        tests = [test for test in tests if len(test.examples) > 0]
        # By default tests are sorted by alphabetical order by function name.
        # We sort by line number so one can edit the file sequentially from
        # bottom to top. However, if there are decorated functions, their line
        # numbers will be too large and for now one must just search for these
        # by text and function name.
        tests.sort(key=lambda x: -x.lineno)

        if not tests:
            return
        self._reporter.entering_filename(filename, len(tests))
        for test in tests:
            assert len(test.examples) != 0
            runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS | \
                    pdoctest.NORMALIZE_WHITESPACE | pdoctest.IGNORE_EXCEPTION_DETAIL)
            old = sys.stdout
            new = StringIO()
            sys.stdout = new
            # If the testing is normal, the doctests get importing magic to
            # provide the global namespace. If not normal (the default) then
            # then must run on their own; all imports must be explicit within
            # a function's docstring. Once imported that import will be
            # available to the rest of the tests in a given function's
            # docstring (unless clear_globs=True below).
            if not self._normal:
                test.globs = {}
                # if this is uncommented then all the test would get is what
                # comes by default with a "from sympy import *"
                #exec('from sympy import *') in test.globs
            try:
                f, t = runner.run(test, out=new.write, clear_globs=False)
            finally:
                sys.stdout = old
            if f > 0:
                self._reporter.doctest_fail(test.name, new.getvalue())
            else:
                self._reporter.test_pass()
        self._reporter.leaving_filename()
def calculate_dispersion(atom_list,N_atoms_uc,N_atoms,Jij,showEigs=False):
    Sabn=generate_sabn(N_atoms)       
#    print 'Sabn',Sabn 
    Sxyz=generate_sxyz(Sabn,atom_list)
#    print 'Sxyz', Sxyz
        
    if 1:
        #print len(translations)   
        J=sympy.Symbol('J',real=True)
        #Jij=[N.matrix([[J,0,0],[0,J,0],[0,0,J]])]
        #Hdef=generate_hdef(atom_list,Jij,Sabn,N_atoms_uc,N_atoms)
        Hdef=generate_hdef(atom_list,Jij,Sxyz,N_atoms_uc,N_atoms)
        print 'Hdef'#,Hdef
#        file_pathname = os.path.abspath('')
#        N.save(os.path.join(file_pathname,r'oldHlin.txt'),[Hdef])
#        sys.exit()
        
        #pngview(Hdef)
        #print_matplotlib(latex(Hdef)) 
    #if 0:
        Hlin=holstein(Hdef)

        print 'Hlin'#,Hlin
        kx=sympy.Symbol('kx',real=True)
        ky=sympy.Symbol('ky',real=True)
        kz=sympy.Symbol('kz',real=True)
        k=[kx,ky,kz]
        fourier_table_uc,fourier_table=gen_fourier_table(atom_list,N_atoms_uc,N_atoms)
        Hfou=fouriertransform(atom_list,Jij,Hlin,k,N_atoms_uc,N_atoms,fourier_table_uc,fourier_table)
        print 'Hfou'#,Hfou
    if 1:
        Hcomm=Hfou#applycommutation(atom_list,Jij,Hfou,k,N_atoms_uc,N_atoms)
#        print 'Hcomm',Hcomm
        operator_table=gen_operator_table(atom_list,N_atoms_uc)
#        print 'optable',operator_table
        operator_table_dagger=gen_operator_table_dagger(atom_list,N_atoms_uc)
#        print 'optable_dagger',operator_table_dagger
        XdX,g=gen_XdX(atom_list,operator_table,operator_table_dagger,Hcomm,N_atoms_uc)
#        print 'XdX',XdX
#        print 'g',g
        TwogH2=g*XdX
#        TwogH2=2*g*XdX
#        print 'TwogH2',TwogH2
        print 'trigifying'
        m,n=TwogH2.shape
        if 1:
                for i in range(m):
                    for j in range(n):
                        #print i,j
                        #print Ntwo[i,j]
                        #print 'matching'
                        #print 'kx',Ntwo[i,j].match(kx)
                        #print 'ky',Ntwo[i,j].match(ky)
                        #Ntwo[i,j]=sympy.re(Ntwo[i,j].evalf())
                        #Ntwo[i,j]=Ntwo[i,j].evalf()
                        TwogH2[i,j]=TwogH2[i,j].expand(complex=True, trig = True)#.subs(I,1.0j)
        print 'trigified'
#        print 'trigified',TwogH2
#        print TwogH2[0,1]            
                        
        Hsave=TwogH2        
        if showEigs:
            #print 'calculating'
            x=sympy.Symbol('x')
            #eigspoly=TwogH2.berkowitz_charpoly(x)
            #print 'eigspoly'
            #print 'eigs poly',eigspoly
            if 0:
                print 'shape', TwogH2.shape
                print 'recalculating\n\n'
                print TwogH2, "\n\n"

            eigs=TwogH2.eigenvals()
            x=sympy.Symbol('x')
           # print_matplotlib(latex(TwogH2))
            #eigs=TwogH2.berkowitz_charpoly(x)
            print 'eigs', eigs
            keys=eigs.keys()
            #print 'key',keys[0]
            #print keys[0].expand(complex=True)
            #print 'charpoly',TwogH2.charpoly(x)
            #eigs=TwogH2.eigenvals()
            #print 'eigenvalues', sympy.simplify(eigs[1][0])
            return (Hsave, Hsave.charpoly(x), eigs)
        print 'calc dispersion: complete'
        clear_cache()
        return Hsave
예제 #56
0
def test_perf_data_gathering(ctx_getter, n_arms=5):
    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    target_order = 8

    starfish_func = NArmedStarfish(n_arms, 0.8)
    mesh = make_curve_mesh(
            starfish_func,
            np.linspace(0, 1, n_arms * 30),
            target_order)

    sigma_sym = sym.var("sigma")

    # The kernel doesn't really matter here
    from sumpy.kernel import LaplaceKernel
    k_sym = LaplaceKernel(mesh.ambient_dim)

    sym_op = sym.S(k_sym, sigma_sym, qbx_forced_limit=+1)

    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import (
            InterpolatoryQuadratureSimplexGroupFactory)
    pre_density_discr = Discretization(
            queue.context, mesh,
            InterpolatoryQuadratureSimplexGroupFactory(target_order))

    results = []

    def inspect_geo_data(insn, bound_expr, geo_data):
        from pytential.qbx.fmm import assemble_performance_data
        perf_data = assemble_performance_data(geo_data, uses_pde_expansions=True)
        results.append(perf_data)

        return False  # no need to do the actual FMM

    from pytential.qbx import QBXLayerPotentialSource
    lpot_source = QBXLayerPotentialSource(
            pre_density_discr, 4*target_order,
            # qbx order and fmm order don't really matter
            10, fmm_order=10,
            _expansions_in_tree_have_extent=True,
            _expansion_stick_out_factor=0.5,
            geometry_data_inspector=inspect_geo_data,
            target_association_tolerance=1e-10,
            )

    lpot_source, _ = lpot_source.with_refinement()

    density_discr = lpot_source.density_discr

    if 0:
        from meshmode.discretization.visualization import draw_curve
        draw_curve(density_discr)
        import matplotlib.pyplot as plt
        plt.show()

    nodes = density_discr.nodes().with_queue(queue)
    sigma = cl.clmath.sin(10 * nodes[0])

    bind(lpot_source, sym_op)(queue, sigma=sigma)
예제 #57
0
파일: runtests.py 프로젝트: Ingwar/sympy
    def test_file(self, filename):
        clear_cache()
        name = "test%d" % self._count
        name = os.path.splitext(os.path.basename(filename))[0]
        self._count += 1
        gl = {'__file__':filename}
        random.seed(self._seed)
        try:
            execfile(filename, gl)
        except (ImportError, SyntaxError):
            self._reporter.import_error(filename, sys.exc_info())
            return
        pytestfile = ""
        if "XFAIL" in gl:
            pytestfile = inspect.getsourcefile(gl["XFAIL"])
        disabled = gl.get("disabled", False)
        if disabled:
            funcs = []
        else:
            # we need to filter only those functions that begin with 'test_'
            # that are defined in the testing file or in the file where
            # is defined the XFAIL decorator
            funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
                                                 (inspect.isfunction(gl[f])
                                                    or inspect.ismethod(gl[f])) and
                                                 (inspect.getsourcefile(gl[f]) == filename or
                                                   inspect.getsourcefile(gl[f]) == pytestfile)]
            # Sorting of XFAILed functions isn't fixed yet :-(
            funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
            i = 0
            while i < len(funcs):
                if isgeneratorfunction(funcs[i]):
                # some tests can be generators, that return the actual
                # test functions. We unpack it below:
                    f = funcs.pop(i)
                    for fg in f():
                        func = fg[0]
                        args = fg[1:]
                        fgw = lambda: func(*args)
                        funcs.insert(i, fgw)
                        i += 1
                else:
                    i += 1
            # drop functions that are not selected with the keyword expression:
            funcs = [x for x in funcs if self.matches(x)]

        if not funcs:
            return
        self._reporter.entering_filename(filename, len(funcs))
        for f in funcs:
            self._reporter.entering_test(f)
            try:
                f()
            except KeyboardInterrupt:
                raise
            except:
                t, v, tr = sys.exc_info()
                if t is AssertionError:
                    self._reporter.test_fail((t, v, tr))
                    if self._post_mortem:
                        pdb.post_mortem(tr)
                elif t.__name__ == "Skipped":
                    self._reporter.test_skip(v)
                elif t.__name__ == "XFail":
                    self._reporter.test_xfail()
                elif t.__name__ == "XPass":
                    self._reporter.test_xpass(v)
                else:
                    self._reporter.test_exception((t, v, tr))
                    if self._post_mortem:
                        pdb.post_mortem(tr)
            else:
                self._reporter.test_pass()
        self._reporter.leaving_filename()
예제 #58
0
  def alt_downset(self):

    topset = PegPermSet(self)

    bottom_edge = PegPermSet()
    keyssofar = PegPermSet()
    unclean = dict()

    for PP in self:
      if PP.is_compact() and not PP.is_compact_and_clean():
        cleaned = PP.clean()
        if cleaned in keyssofar:
          unclean[cleaned].add(PP)
        else:
          unclean[cleaned] = PegPermSet([PP])
          keyssofar.add(cleaned)

    bottom_edge.update(self)
    n = len(bottom_edge)
    gf = self.sum_gfs_no_basis(bottom_edge, only_clean=True)

    while len(bottom_edge) > 0:
      oldsize = n
      n = len(bottom_edge)
      next_layer = PegPermSet()

      i = 0
      num_built = 0
      t = time.time()
      while len(bottom_edge) > 0:
        i += 1
        P = bottom_edge.pop()
        next_layer.update(P.shrink_by_one())
        del P
        if i % 100000 == 0:
          clear_cache()
          print('\t',i,'of',n,'. Now with',len(next_layer),'. Took',(time.time()-t),'seconds.')
          t = time.time()


      del bottom_edge
      clear_cache()
      n += len(next_layer)
      
      print('\t\tScanning permutations for cleanliness!')
      i = 0
      num_unclean = 0
      nll = len(next_layer)
      for PP in next_layer:
        i += 1
        if i % 200000 == 0:
          print('\t\t\tScanned',i,'of',nll,'.')
        if PP.is_compact() and not PP.is_compact_and_clean():
          cleaned = PP.clean()
          num_unclean += 1
          if cleaned in keyssofar:
            unclean[cleaned].add(PP)
          else:
            unclean[cleaned] = PegPermSet([PP])
            keyssofar.add(cleaned)

      print('\t\tScanning permutations for unnecessary uncleans!')
      i = 0
      nll = len(next_layer)
      for PP in next_layer:
        i += 1
        if i % 200000 == 0:
          print('\t\t\tScanned',i,'of',nll,'.')
        if unclean.get(PP):
          del unclean[PP]

      print('\tOut of',len(next_layer),'permutations in this layer,',num_unclean,'were unclean.')
      gf += self.sum_gfs_no_basis(next_layer, only_clean=True)

      
      bottom_edge = next_layer
      del next_layer
      clear_cache()
      newsize = n
      print('\t\tDownset currently has',newsize,'permutations.')

    return (gf,unclean)
예제 #59
0
    def __init__(self,sympy_function,sympy_variables,args={}):
        self.sympy_function = sympy_function.subs(args)
        self.sympy_variables = sympy_variables
        self.lambdified = lambdify(self.sympy_variables,self.sympy_function)
        clear_cache()
        # I need a unique way to identify the integrand libraries I will be 
        # generating. id(self) works sort of, but it may or may not be unique
        # I think. I'm going to try a hash of the underlying sympy function,
        # but I can't allow negative numbers (C gets confused), so I have to 
        # generate a positive integer instead. Stackoverflow had this idea:
        self.unique_id = ctypes.c_size_t(hash(self.sympy_function)).value
        # stackoverflow.com/questions/18766535/...
        #   positive-integer-from-python-hash-function
        filename_prefix = os.path.join(
            'integrand_libs','integrand'+str(self.unique_id) )
        libname_prefix = os.path.join(
            'integrand_libs',''+str(self.unique_id) )
# Enable the use of ctypes objects in nquad, once multivariate ctypes objects
# are appropriate arguments for the QUADPACK library functions.
        try:
            self.generated_code = codegen(
                ('integrand',self.sympy_function),'C',filename_prefix,
                argument_sequence=self.sympy_variables,to_files=True)
        except IOError:
            import pdb;pdb.set_trace()
        args_str = ",".join(["args["+str(ind)+"]" 
                             for ind in range(len(self.sympy_variables))])
        extra_c_code="".join([r"""
double integrand_wrapper(int n, double args[n])
{
   return integrand(""",args_str,""");

}
"""])
        extra_h_code="""

double integrand_wrapper(int n, double args[n]);

"""
        
        f = open(filename_prefix+".c",'a')
        f.write(extra_c_code)
        f.close()
        f = open(filename_prefix+".h",'a')
        f.write(extra_h_code)
        f.close()
        cmd = ("gcc -dynamiclib -O3 -I. "+filename_prefix+".c -o "
               +filename_prefix+".dylib")
        subprocess.call(cmd,shell=True)
        self.ctypeslib = ctypes.CDLL(filename_prefix+'.dylib')
        self.ctypesified = self.ctypeslib.integrand_wrapper
        self.ctypesified.restype = ctypes.c_double
        # Test the reliability of ctypesified function. This should be 
        # disabled eventually.
        self.ctypesified.argtypes = (ctypes.c_int,
                                     len(self.sympy_variables)*ctypes.c_double)
        
        test = []
        for indx in range(0):
            randargs = [random.random() for item in self.sympy_variables]
            temp = (self.lambdified(*randargs)-
                    self.ctypesified(
                    ctypes.c_int(len(self.sympy_variables)),
                    (len(self.sympy_variables)*ctypes.c_double)(*randargs)))
            if temp**2 >.00000001:
                test.append(temp)
        if test:
            raise IntegrationError("Ctypesified and lambdified do not match!")
        self.ctypesified.argtypes = ctypes.c_int, ctypes.c_double
        return None
예제 #60
0
 def tearDown(self):
     clear_cache()