示例#1
0
def Shell(user_session):
    # This should bring back the old autocall behaviour. e.g.:
    # In [1]: pslist
    cfg = Config()
    cfg.InteractiveShellEmbed.autocall = 2
    cfg.TerminalInteractiveShell.prompts_class = RekallPrompt
    cfg.InteractiveShell.separate_in = ''
    cfg.InteractiveShell.separate_out = ''
    cfg.InteractiveShell.separate_out2 = ''

    shell = RekallShell(config=cfg, user_ns=user_session.locals)

    shell.Completer.merge_completions = False
    shell.exit_msg = constants.GetQuote()
    shell.set_custom_completer(RekallCompleter, 0)

    # Do we need to pre-run something?
    if user_session.run != None:
        execfile(user_session.run, user_session.locals)

    user_session.shell = shell

    # Set known delimeters for the completer. This varies by OS so we need to
    # set it to ensure consistency.
    readline.set_completer_delims(' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?')

    for magic in REGISTERED_MAGICS:
        shell.register_magics(magic)

    shell(module=user_session.locals, )

    return True
示例#2
0
文件: rb.py 项目: hubitor/progs
def execpyfile(fname, searchdir):

    if os.path.isfile(os.path.abspath(fname)):
        testname = os.path.abspath(fname)
    elif os.path.isfile(os.path.join(searchdir, fname)):
        testname = os.path.join(searchdir, fname)
    if not testname:
        raise Exception("file not found: %s" % fname)

    #setupwdir(testname)

    # split streams
    #tee = Tee('stdout.txt')

    # start test
    import time, platform
    print('-' * 79)
    print("starting test", testname)
    print("time:", time.strftime("%c"))
    print("hostname:", platform.node())
    print('-' * 79)

    env = globals()
    env['__file__'] = testname

    execfile(testname, env)
示例#3
0
    def run(self):
        # This is only called when installing, I think.
        print('P4_install_data.run() here, sitting in for install_data.run()')
        # data_files is a list of one string, defined below in setup(), ['share/doc/p4-0.xx']
        print('data_files = %s' % self.data_files)
        print('install_dir = %s' % self.install_dir)  # eg /usr
        fList = self.copy_tree(
            'share', os.path.join(self.install_dir, self.data_files[0]))
        # If we need to ignore some files, we can use this below instead of self.copy_tree ...
        # shutil.copytree('share',
        #                 os.path.join(self.install_dir, self.data_files[0]),
        #                 ignore=my_ignore)

        mySharePath = os.path.join(self.install_dir, self.data_files[0])
        mySphinxIndexPath = os.path.join(mySharePath,
                                         'sphinxdoc/_build/html/index.html')
        myExamplesPath = os.path.join(mySharePath, 'Examples')

        try:
            loc = {}
            execfile("%s" % instFileName, {}, loc)  # get the p4_lib_dir
            p4_lib_dir = loc['p4LibDir']
            instFile = file(instFileName, 'a')
            instFile.write("p4DocDir = '%s'\n" % mySharePath)
            instFile.write("p4SphinxIndexPath = '%s'\n" % mySphinxIndexPath)
            instFile.write("p4ExamplesDir = '%s'\n" % myExamplesPath)
            instFile.close()
            os.system("cp %s %s" % (instFileName, p4_lib_dir))
            from py_compile import compile
            compile(os.path.join(p4_lib_dir, instFileName))
            os.system("rm -f %s" % instFileName)
        except IOError:
            print("The file '%s' cannot be found." % instFileName)
示例#4
0
文件: main.py 项目: posita/modwalk
    def evalcallback(
            cls,
            value,
            ns,
    ):
        args = ()
        kw = {}

        if value.startswith('@'):
            try:
                path, symbol = value[1:].rsplit(':', 1)
            except ValueError:
                raise ValueError('"{}" must be in the format @FILE:SYMBOL'.format(value))

            path = os.path.realpath(path)
            execfile(path, ns, ns)
            callback = ns[symbol]
        else:
            callback = eval(value, ns, ns)

            try:
                callback, args, kw = callback
            except TypeError:
                pass
            except ValueError:
                try:
                    callback, args = callback
                except ValueError:
                    pass

        return (callback, args, kw)
示例#5
0
    def _getScriptFuncs(self, isFirst=None):
        """Return a dictionary containing either scriptClass
        or one or more of initFunc, runFunc, endFunc;
        it may also contain HelpURL.
        """
        #       print "_getScriptFuncs(%s)" % isFirst
        scriptLocals = {"__file__": self.fullPath}
        execfile(self.filename, scriptLocals)

        retDict = {}
        helpURL = scriptLocals.get("HelpURL")
        if helpURL:
            retDict["HelpURL"] = helpURL

        scriptClass = scriptLocals.get("ScriptClass")
        if scriptClass:
            retDict["scriptClass"] = scriptClass
            return retDict

        for attrName in ("run", "init", "end"):
            attr = scriptLocals.get(attrName)
            if attr:
                retDict["%sFunc" % attrName] = attr
            elif attrName == "run":
                raise RuntimeError("%r has no %s function" %
                                   (self.filename, attrName))

        return retDict
示例#6
0
def Shell(user_session):
    # This should bring back the old autocall behaviour. e.g.:
    # In [1]: pslist
    cfg = Config()
    cfg.InteractiveShellEmbed.autocall = 2
    cfg.TerminalInteractiveShell.prompts_class = RekallPrompt
    cfg.InteractiveShell.separate_in = ''
    cfg.InteractiveShell.separate_out = ''
    cfg.InteractiveShell.separate_out2 = ''

    shell = RekallShell(config=cfg, user_ns=user_session.locals)

    shell.Completer.merge_completions = False
    shell.exit_msg = constants.GetQuote()
    shell.set_custom_completer(RekallCompleter, 0)

    # Do we need to pre-run something?
    if user_session.run != None:
        execfile(user_session.run, user_session.locals)

    user_session.shell = shell

    # Set known delimeters for the completer. This varies by OS so we need to
    # set it to ensure consistency.
    readline.set_completer_delims(' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?')

    for magic in REGISTERED_MAGICS:
        shell.register_magics(magic)

    shell(module=user_session.locals, )

    return True
示例#7
0
def load_apps(APPS_DIR):
    create_apps_dir(APPS_DIR)
    if APPS_DIR not in sys.path:
        sys.path.append(APPS_DIR)
    apps_file_path = os.path.join(APPS_DIR, "apps.yml")
    apps_config = AppsConfig(apps_file_path)
    CARTOVIEW_APPS = ()
    for app_config in apps_config:
        if app_config.active:
            try:
                # ensure that the folder is python module
                app_module = importlib.import_module(app_config.name)
                app_dir = os.path.dirname(app_module.__file__)
                app_settings_file = os.path.join(app_dir, 'settings.py')
                if os.path.exists(app_settings_file):
                    # By doing this instead of import, app/settings.py can
                    # refer to local variables from settings.py without
                    # circular imports.
                    execfile(app_settings_file)
                if app_config.name not in CARTOVIEW_APPS:
                    # app_config.name.__str__() because Django don't like
                    # unicode_literals
                    CARTOVIEW_APPS += (app_config.name.__str__(), )
            except Exception as e:
                print(e.message)
                logger.error(e.message)

    return CARTOVIEW_APPS
示例#8
0
    def test_loopBlocks(self):
        """An experiment file with made-up params and routines to see whether
        future versions of experiments will get loaded.
        """
        #load the test experiment (with a stims loop, trials loop and blocks loop)
        expfile = path.join(self.exp.prefsPaths['tests'], 'data', 'testLoopsBlocks.psyexp')
        self.exp.loadFromXML(expfile) # reload the edited file
        #alter the settings so the data goes to our tmp dir
        datafileBase = os.path.join(self.tmp_dir, 'testLoopsBlocks')
        datafileBaseRel = os.path.relpath(datafileBase,expfile)
        self.exp.settings.params['Data filename'].val = repr(datafileBaseRel)
        #write the script from the experiment
        script = self.exp.writeScript(expPath=expfile)
        py_file = os.path.join(self.tmp_dir, 'testLoopBlocks.py')

        # save it
        with codecs.open(py_file, 'w', 'utf-8-sig') as f:
            f.write(script.replace("core.quit()", "pass"))
            f.write("del thisExp\n") #garbage collect the experiment so files are auto-saved

        #run the file (and make sure we return to this location afterwards)
        wd = os.getcwd()
        execfile(py_file)
        os.chdir(wd)
        #load the data
        print("searching..." +datafileBase)
        print(glob.glob(datafileBase+'*'))
        f = open(datafileBase+".csv", 'rb')
        dat = numpy.recfromcsv(f, case_sensitive=True)
        f.close()
        assert len(dat)==8 # because 4 'blocks' with 2 trials each (3 stims per trial)
示例#9
0
    def _getScriptFuncs(self, isFirst=None):
        """Return a dictionary containing either scriptClass
        or one or more of initFunc, runFunc, endFunc;
        it may also contain HelpURL.
        """
#       print "_getScriptFuncs(%s)" % isFirst
        scriptLocals = {"__file__": self.fullPath}
        execfile(self.filename, scriptLocals)
        
        retDict = {}
        helpURL = scriptLocals.get("HelpURL")
        if helpURL:
            retDict["HelpURL"] = helpURL

        scriptClass = scriptLocals.get("ScriptClass")
        if scriptClass:
            retDict["scriptClass"] = scriptClass
            return retDict
        
        for attrName in ("run", "init", "end"):
            attr = scriptLocals.get(attrName)
            if attr:
                retDict["%sFunc" % attrName] = attr
            elif attrName == "run":
                raise RuntimeError("%r has no %s function" % (self.filename, attrName))

        return retDict
示例#10
0
    def PythonRuntimeInit(self):
        MethodNames = ["init", "start", "stop", "cleanup"]
        self.python_runtime_vars = globals().copy()
        self.python_runtime_vars.update(self.pyruntimevars)
        parent = self

        class PLCSafeGlobals(object):
            def __getattr__(self, name):
                try:
                    t = parent.python_runtime_vars["_" + name + "_ctype"]
                except KeyError:
                    raise KeyError(
                        "Try to get unknown shared global variable : %s" %
                        name)
                v = t()
                parent.python_runtime_vars["_PySafeGetPLCGlob_" + name](
                    ctypes.byref(v))
                return parent.python_runtime_vars["_" + name + "_unpack"](v)

            def __setattr__(self, name, value):
                try:
                    t = parent.python_runtime_vars["_" + name + "_ctype"]
                except KeyError:
                    raise KeyError(
                        "Try to set unknown shared global variable : %s" %
                        name)
                v = parent.python_runtime_vars["_" + name + "_pack"](t, value)
                parent.python_runtime_vars["_PySafeSetPLCGlob_" + name](
                    ctypes.byref(v))

        self.python_runtime_vars.update({
            "PLCGlobals": PLCSafeGlobals(),
            "WorkingDir": self.workingdir,
            "PLCObject": self,
            "PLCBinary": self.PLClibraryHandle,
            "PLCGlobalsDesc": []
        })

        for methodname in MethodNames:
            self.python_runtime_vars["_runtime_%s" % methodname] = []

        try:
            filenames = os.listdir(self.workingdir)
            filenames.sort()
            for filename in filenames:
                name, ext = os.path.splitext(filename)
                if name.upper().startswith("RUNTIME") and ext.upper() == ".PY":
                    execfile(os.path.join(self.workingdir, filename),
                             self.python_runtime_vars)
                    for methodname in MethodNames:
                        method = self.python_runtime_vars.get(
                            "_%s_%s" % (name, methodname), None)
                        if method is not None:
                            self.python_runtime_vars["_runtime_%s" %
                                                     methodname].append(method)
        except Exception:
            self.LogMessage(0, traceback.format_exc())
            raise

        self.PythonRuntimeCall("init")
示例#11
0
 def test_loopBlocks(self):
     """An experiment file with made-up params and routines to see whether
     future versions of experiments will get loaded.
     """
     #load the test experiment (with a stims loop, trials loop and blocks loop)
     expfile = path.join(self.exp.prefsPaths['tests'], 'data',
                         'testLoopsBlocks.psyexp')
     self.exp.loadFromXML(expfile)  # reload the edited file
     #alter the settings so the data goes to our tmp dir
     datafileBase = os.path.join(self.tmp_dir, 'testLoopsBlocks')
     datafileBaseRel = os.path.relpath(datafileBase, expfile)
     self.exp.settings.params['Data filename'].val = repr(datafileBaseRel)
     #write the script from the experiment
     script = self.exp.writeScript(expPath=expfile)
     py_file = os.path.join(self.tmp_dir, 'testLoopBlocks.py')
     # save it
     f = codecs.open(py_file, 'w', 'utf-8')
     f.write(script.getvalue().replace("core.quit()", "pass"))
     f.write("del thisExp\n"
             )  #garbage collect the experiment so files are auto-saved
     f.close()
     #run the file (and make sure we return to this location afterwards)
     wd = os.getcwd()
     execfile(py_file)
     os.chdir(wd)
     #load the data
     print("searching..." + datafileBase)
     print(glob.glob(datafileBase + '*'))
     f = open(datafileBase + ".csv", 'rb')
     dat = numpy.recfromcsv(f, case_sensitive=True)
     f.close()
     assert len(
         dat
     ) == 8  # because 4 'blocks' with 2 trials each (3 stims per trial)
示例#12
0
    def test_check_content_with_mocked_http_server(self, httpserver):
        httpserver.serve_content(
            open(os.path.join(fix_dir, 'eqsl_data.html'), 'rb').read(),
            headers={'content-type': 'text/plain; charset=ISO-8859-1'})

        namespace = {}
        execfile(os.path.join(fix_dir, "eqsl_data.py"), namespace)
        assert get_eqsl_users(url=httpserver.url) == namespace['eqsl_fixture']
示例#13
0
 def test_examples(self):
     """ Loop over examples to check they run """
     examples = ['examples/gw_examples/injection_examples/fast_tutorial.py',
                 'examples/gw_examples/data_examples/GW150914.py',
                 ]
     for filename in examples:
         print("Testing {}".format(filename))
         execfile(filename)
示例#14
0
 def test_examples(self):
     """ Loop over examples to check they run """
     examples = ['examples/core_examples/linear_regression.py',
                 'examples/core_examples/linear_regression_unknown_noise.py',
                 ]
     for filename in examples:
         print("Testing {}".format(filename))
         execfile(filename)
示例#15
0
文件: run.py 项目: sandeez/lino
 def handle(self, *args, **options):
     if len(args) == 0:
         raise CommandError("I need at least one argument.")
     fn = args[0]
     sys.argv = sys.argv[2:]
     globals()['__name__'] = '__main__'
     globals()['__file__'] = fn
     execfile(fn, globals())
示例#16
0
 def on_run(self):
     globals = None
     locals = None
     if globals is None:
         import __main__
         globals = __main__.__dict__
     if locals is None:
         locals = globals
     execfile(self.tab_widget.currentWidget().filename, globals, locals)
示例#17
0
def load_pytest_conf(path, parser):
    """loads a ``pytestconf.py`` file and update default parser
    and / or tester.
    """
    namespace = {}
    execfile(path, namespace)
    if "update_parser" in namespace:
        namespace["update_parser"](parser)
    return namespace.get("CustomPyTester", PyTester)
示例#18
0
 def LoadExtensions(self):
     for extfilename in self.extensions:
         from util.TranslationCatalogs import AddCatalog
         from util.BitmapLibrary import AddBitmapFolder
         extension_folder = os.path.split(os.path.realpath(extfilename))[0]
         sys.path.append(extension_folder)
         AddCatalog(os.path.join(extension_folder, "locale"))
         AddBitmapFolder(os.path.join(extension_folder, "images"))
         execfile(extfilename, self.globals())
示例#19
0
def load_pytest_conf(path, parser):
    """loads a ``pytestconf.py`` file and update default parser
    and / or tester.
    """
    namespace = {}
    execfile(path, namespace)
    if 'update_parser' in namespace:
        namespace['update_parser'](parser)
    return namespace.get('CustomPyTester', PyTester)
示例#20
0
def configure_env_test():
    config_env = os.path.join(os.path.dirname(utilities.__file__), 'configure_env.py')
    for p in range(10):
        file_template = '/my/test/path/{0}'.format(p)

    execfile(config_env, dict(__file__=__file__,
                              append_os_path=['/my/test/path/1:/my/test/path/2'],
                              append_os_path=['/my/test/path/3:/my/test/path/4']))
    assert os.environ['PATH'].split(os.pathsep)[0]
示例#21
0
def run():
    parameter_file = sys.argv[1]
    parameters = {}
    execfile(parameter_file, parameters)  # this way of reading parameters
    # is not necessarily recommended
    numpy.random.seed(parameters["seed"])
    distr = getattr(numpy.random, parameters["distr"])
    data = distr(size=parameters["n"])

    numpy.savetxt("Data/example2.dat", data)
示例#22
0
文件: main.py 项目: Felix11H/sumatra
def run():
    parameter_file = sys.argv[1]
    parameters = {}
    execfile(parameter_file, parameters) # this way of reading parameters
                                         # is not necessarily recommended
    numpy.random.seed(parameters["seed"])
    distr = getattr(numpy.random, parameters["distr"])
    data = distr(size=parameters["n"])
        
    numpy.savetxt("Data/example2.dat", data)
示例#23
0
def parse_config_file(filename):
    module = {
        "__builtins__": __builtins__,
        "__name__": "__config__",
        "__file__": filename,
        "__doc__": None,
        "__package__": None
    }
    execfile(filename, module, module)
    return module
示例#24
0
 def test_tutorial_examples(self):
     """Runs all tutorial examples. If run without errors, passes test"""
     example_script = 'tutorial_ex%d.py'
     for example_num in range(1, 7):
         # Example 3 isn't meant to work in parallel
         if not (_parallel.is_distributed() and example_num != 3):
             #printing(False)
             _parallel.barrier()
             execfile(join(examples_dir, example_script%example_num))
             _parallel.barrier()
示例#25
0
 def test_examples(self):
     """ Loop over examples to check they run """
     examples = [
         'examples/injection_examples/basic_tutorial.py',
         'examples/injection_examples/marginalized_likelihood.py',
         'examples/open_data_examples/GW150914.py',
     ]
     for filename in examples:
         print("Testing {}".format(filename))
         execfile(filename)
示例#26
0
def parse_config_file(filename):
    module = {
        "__builtins__": __builtins__,
        "__name__": "__config__",
        "__file__": filename,
        "__doc__": None,
        "__package__": None
    }
    execfile(filename, module, module)
    return module
示例#27
0
def readfile(configFile, outDict=None):
    """ execute a python file, and return the final environment. """
    
    gdict = {}
    ldict = {}
        
    execfile(configFile, gdict) # , ldict)
    if outDict:
        outDict.update(gdict)
    return gdict
示例#28
0
def readfile(configFile, outDict=None):
    """ execute a python file, and return the final environment. """

    gdict = {}
    ldict = {}

    execfile(configFile, gdict)  # , ldict)
    if outDict:
        outDict.update(gdict)
    return gdict
示例#29
0
 def test_tutorial_examples(self):
     """Runs all tutorial examples. If run without errors, passes test"""
     example_script = 'tutorial_ex%d.py'
     for example_num in range(1, 7):
         # Example 3 isn't meant to work in parallel
         if not (_parallel.is_distributed() and example_num != 3):
             #printing(False)
             _parallel.barrier()
             execfile(join(examples_dir, example_script % example_num))
             _parallel.barrier()
示例#30
0
def exec_file(filename):
    cur_file = os.path.basename(os.path.abspath(__file__))
    def _trace(frame, event, arg_unused):
        basename = frame.f_code.co_filename
        if cur_file == "trace_python3.py" and basename[-len(filename):] == filename:
            print("$$lineno: %s" % (frame.f_lineno))
        return _trace
    sys.settrace(_trace)
    execfile(filename)
    sys.settrace(None)
示例#31
0
    def from_file(self, filename):
        """
        Load settings from a Python file

        Args:
            filename (str): Absolute path to a Python file where settings
                variables are defined.
        """
        config_mod = imp.new_module('config')
        execfile(filename, config_mod.__dict__)
        return self.from_object(config_mod)
示例#32
0
def do(n):
	global script,title
	script,title,freq=simulations[n]
	O.reset(); plot.reset()
	execfile(script,globals())
	# add output engines
	O.engines=O.engines+[qt.SnapshotEngine(fileBase=O.tmpFilename(),iterPeriod=freq,plot='snapshot')]
	# add the snapshot subplot
	plot.plots.update({'snapshot':None}) 
	# open the view so that it can be set up
	qt.View()
示例#33
0
    def from_file(self, filename):
        """
        Load settings from a Python file

        Args:
            filename (str): Absolute path to a Python file where settings
                variables are defined.
        """
        config_mod = imp.new_module('config')
        execfile(filename, config_mod.__dict__)
        return self.from_object(config_mod)
示例#34
0
def createTweet(message):
    config = {}
    execfile("Twitter/config.py", config)

    # -----------------------------------------------------------------------
    # create twitter API object
    # -----------------------------------------------------------------------
    twitter = Twitter(
        auth=OAuth(config["access_key"], config["access_secret"],
                   config["consumer_key"], config["consumer_secret"]))
    twitter.statuses.update(status=message)
示例#35
0
def configure_env_test():
    config_env = os.path.join(os.path.dirname(utilities.__file__),
                              'configure_env.py')
    for p in range(10):
        file_template = '/my/test/path/{0}'.format(p)

    execfile(
        config_env,
        dict(__file__=__file__,
             append_os_path=['/my/test/path/1:/my/test/path/2'],
             append_os_path=['/my/test/path/3:/my/test/path/4']))
    assert os.environ['PATH'].split(os.pathsep)[0]
示例#36
0
 def handle(self, *args, **options):
     if True:  # Django 1.10
         fn = options['filename'][0]
     else:
         if len(args) == 0:
             raise CommandError("I need at least one argument.")
         fn = args[0]
     # fn = filename[0]
     sys.argv = sys.argv[2:]
     globals()['__name__'] = '__main__'
     globals()['__file__'] = fn
     execfile(fn, globals())
示例#37
0
 def handle(self, *args, **options):
     if True:  # Django 1.10
         fn = options['filename'][0]
     else:
         if len(args) == 0:
             raise CommandError("I need at least one argument.")
         fn = args[0]
     # fn = filename[0]
     sys.argv = sys.argv[2:]
     globals()['__name__'] = '__main__'
     globals()['__file__'] = fn
     execfile(fn, globals())
示例#38
0
    def readConfFile(confFile):
        g = {}
        l = {}

        try:
            if not os.path.exists(confFile):
                return False
            else:
                execfile(confFile, g, l)
                return l
        except Exception:
            return False
示例#39
0
def load_cfg_file(fname):
    """return the dictionary of components and their properties
    """
    comps_db = {}
    try:
        import shelve
        comps_db = shelve.open(fname, 'r')
        return comps_db['all-cfgs']
    except Exception:
        from past.builtins import execfile
        execfile(fname, comps_db)
        return comps_db['d']
示例#40
0
文件: commands.py 项目: zlangley/clr
def get_command(which):
    if which == 'system':
        obj = System()
    else:
        path = path_of_module(clr.config.commands()[which])
        d = {}
        execfile(path, d)
        obj = d['COMMANDS']

    # Backfill namespace.
    obj.ns = which

    return obj
示例#41
0
    def LoadXMLParams(self, CTNName=None):
        methode_name = os.path.join(self.CTNPath(CTNName), "methods.py")
        if os.path.isfile(methode_name):
            execfile(methode_name)

        ConfNodeName = CTNName if CTNName is not None else self.CTNName()

        # Get the base xml tree
        if self.MandatoryParams:
            try:
                basexmlfile = open(self.ConfNodeBaseXmlFilePath(CTNName), 'r')
                self.BaseParams, error = _BaseParamsParser.LoadXMLString(
                    basexmlfile.read())
                if error is not None:
                    (fname, lnum,
                     src) = ((ConfNodeName + " BaseParams", ) + error)
                    self.GetCTRoot().logger.write_warning(
                        XSDSchemaErrorMessage.format(a1=fname, a2=lnum,
                                                     a3=src))
                self.MandatoryParams = ("BaseParams", self.BaseParams)
                basexmlfile.close()
            except Exception as exc:
                msg = _("Couldn't load confnode base parameters {a1} :\n {a2}"
                        ).format(a1=ConfNodeName, a2=str(exc))
                self.GetCTRoot().logger.write_error(msg)
                print(traceback.format_exc())

        # Get the xml tree
        if self.CTNParams:
            try:
                xmlfile = open(self.ConfNodeXmlFilePath(CTNName),
                               'r',
                               encoding='utf-8')
                obj, error = self.Parser.LoadXMLString(xmlfile.read())
                if error is not None:
                    (fname, lnum, src) = ((ConfNodeName, ) + error)
                    self.GetCTRoot().logger.write_warning(
                        XSDSchemaErrorMessage.format(a1=fname, a2=lnum,
                                                     a3=src))
                name = obj.getLocalTag()
                setattr(self, name, obj)
                self.CTNParams = (name, obj)
                xmlfile.close()
            except Exception as exc:
                msg = _(
                    "Couldn't load confnode parameters {a1} :\n {a2}").format(
                        a1=ConfNodeName, a2=str(exc))
                self.GetCTRoot().logger.write_error(msg)
                print(traceback.format_exc())
示例#42
0
def do(n):
    global script, title
    script, title, freq = simulations[n]
    O.reset()
    plot.reset()
    execfile(script, globals())
    # add output engines
    O.engines = O.engines + [
        qt.SnapshotEngine(
            fileBase=O.tmpFilename(), iterPeriod=freq, plot='snapshot')
    ]
    # add the snapshot subplot
    plot.plots.update({'snapshot': None})
    # open the view so that it can be set up
    qt.View()
示例#43
0
def include( filename ):
    """execute the given filename in a given environment."""
    if filename in ExcludedFiles:
        return
    if os.path.isabs( filename ):
        full = filename
    else:
        full = find_joboptions( filename )
        if not full:
            raise JobOptionsNotFoundError( filename )
    try:
        _e = include.env
    except AttributeError:
        _e = {}
    execfile( full, _e )
示例#44
0
def get_setup_info(root_dir):
    if not root_dir.child('setup.py').exists():
        raise RuntimeError(
            "You must call 'fab' from a project's root directory.")
    # sys.path.insert(0, root_dir)
    # setup_module = __import__('setup')
    # print 20140610, root_dir
    # del sys.path[0]
    # return getattr(setup_module, 'SETUP_INFO', None)
    g = dict()
    g['__name__'] = 'not_main'
    cwd = Path().resolve()
    root_dir.chdir()
    execfile(root_dir.child('setup.py'), g)
    cwd.chdir()
    return g.get('SETUP_INFO')
示例#45
0
def _loadSpace(space):
    """ Load a configuration file into the cache. 

    Args:
        space    - a namespace to load from cfgPath/space + ".py"
    """
    
    gdict = {}
    ldict = {}
    
    filename = os.path.join(cfgPath, "%s.py" % (space))
    try:
        execfile(filename, gdict, ldict)
    except SyntaxError as e:
        # ICCError handling should be improved to handle multi-line errors,
        # so we could use the SyntaxError's .text and .offset, and spit out a proper
        # backtrace.
        raise ICCError("syntax error at or before line %d (%s) of the configuration file %s" % (e.lineno, e.text, filename))
    except Exception as e:
        raise ICCError("failed to read the configuration file %s: %s" % (filename, e))
        
    cfgCache[space] = ldict
    return ldict
示例#46
0
    def add_from_file(self, filename, handler_decorator=None):
        """
        Wrapper around add() that reads the handlers from the
        file with the given name. The file is a Python script containing
        a list named 'commands' of tuples that map command names to
        handlers.

        :type  filename: str
        :param filename: The name of the file containing the tuples.
        :type  handler_decorator: function
        :param handler_decorator: A function that is used to decorate
               each of the handlers in the file.
        """
        args = {}
        execfile(filename, args)
        commands = args.get('commands')
        if commands is None:
            raise Exception(filename + ' has no variable named "commands"')
        elif not hasattr(commands, '__iter__'):
            raise Exception(filename + ': "commands" is not iterable')
        for key, handler in commands:
            if handler_decorator:
                handler = handler_decorator(handler)
            self.add(key, handler)
示例#47
0
        'django.template.loaders.filesystem.Loader',
        'django.template.loaders.app_directories.Loader',
    )),
]

ALLOWED_HOSTS = ['*']

MIGASFREE_PUBLIC_DIR = '/var/migasfree/repo'
MIGASFREE_KEYS_DIR = '/usr/share/migasfree-server/keys'

STATIC_ROOT = '/var/migasfree/static'
MEDIA_ROOT = MIGASFREE_PUBLIC_DIR

SECRET_KEY = secret_key(MIGASFREE_KEYS_DIR)

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.postgresql',
        'NAME': 'migasfree',
        'USER': '******',
        'PASSWORD': '******',
        'HOST': '',
        'PORT': '',
    }
}

try:
    execfile(MIGASFREE_SETTINGS_OVERRIDE, globals(), locals())
except IOError:
    pass
示例#48
0
"""
This script simply executes the python file provided as the first argument.  
It basically provides the same functionality as the normal python interpreter, but without many of the options python provides.

It is useful as a means to execute arbitrary scripts in the mac binary.  
This file is passed to py2app via the --extra-scripts option, and then arbitrary scripts can be launched with the same environment that the ilastik app uses.
(Note that the interpreter located in ilastik.app/Contents/MacOS/python does NOT properly set up the environment.)
"""
from past.builtins import execfile
if __name__ == "__main__":
    import os
    import sys

    # Read the file to execute
    f = sys.argv[1]
    
    # Remove this file from the list, so that sys.argv[0] is the name of the file we're executing.
    sys.argv.pop(0)
    __file__ = f
    
    # By default, this script is always launched with a CWD of ilastik.app/Contents/Resources.
    # But that's confusing, so switch back to the CWD of the terminal when the script was launched.
    original_cwd = os.environ['PWD']
    os.chdir(original_cwd)
    
    # Execute the script with execfile.  This way, if __name__ == '__main__' sections should work.
    execfile(f)
示例#49
0
文件: setup.py 项目: MrKriss/sodapy
#!/usr/bin/env python

# Provides Python 2/3 compatability 
from past.builtins import execfile
from builtins import str

from setuptools import setup
execfile("sodapy/version.py")

try:
    import pypandoc
    long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
    long_description = open('README').read()


kwargs = {
    "name": "sodapy",
    "version": str(__version__),
    "packages": ["sodapy"],
    "description": "Python bindings for the Socrata Open Data API",
    "long_description": long_description,
    "author": "Cristina Munoz",
    "maintainer": "Cristina Munoz",
    "author_email": "*****@*****.**",
    "maintainer_email": "*****@*****.**",
    "license": "MIT",
    "install_requires": ["py", "pytest", "requests", "requests-mock", "six"],
    "url": "https://github.com/xmunoz/sodapy",
    "download_url": "https://github.com/xmunoz/sodapy/archive/master.tar.gz",
    "keywords": "soda socrata opendata api",
示例#50
0
    def __init__(self, config=None, connect=True):
        #: A :class:`dict` of various settings. By convention, all keys are
        #: uppercase. These are used to create :attr:`engine` and
        #: :attr:`session`.
        self.config = {}

        #: The :class:`~sqlalchemy.engine.Engine` that underlies
        #: the :attr:`session`.
        self.engine = None

        #: A :class:`~sqlalchemy.orm.session.Session` class.
        self.session = None

        if isinstance(config, (six.text_type, six.string_types)):
            filepath = config
            config = imp.new_module('config')
            config.__file__ = filepath
            try:
                execfile(filepath, config.__dict__)
            except IOError as e:
                e.strerror = 'Cannot load config file: %s' % e.strerror
                raise

        try:
            config = config or {}
            for key in config:
                if key.isupper():
                    self.config[key] = config[key]
        except TypeError:
            for key in dir(config):
                if key.isupper():
                    self.config[key] = getattr(config, key)

        def default(name, *args):
            path = os.path.join(self.config['DATA_PATH'], *args)
            self.config.setdefault(name, path)

        default('COMPOUNDED_NOMINAL_ENDINGS', 'nominal-endings-compounded.csv')
        default('ENUMS', 'enums.csv')
        default('GERUNDS', 'gerunds.csv')
        default('INDECLINABLES', 'indeclinables.csv')
        default('INFINITIVES', 'infinitives.csv')
        default('INFLECTED_NOMINAL_ENDINGS', 'nominal-endings-inflected.csv')
        default('IRREGULAR_ADJECTIVES', 'irregular-adjectives.csv')
        default('IRREGULAR_NOUNS', 'irregular-nouns.csv')
        default('MODIFIED_ROOTS', 'modified-roots.csv')
        default('NOMINAL_STEMS', 'nominal-stems.csv')
        default('PARTICIPLE_STEMS', 'participle-stems.csv')
        default('PREFIXED_ROOTS', 'prefixed-roots.csv')
        default('PREFIX_GROUPS', 'prefix-groups.csv')
        default('PRONOUNS', 'pronouns.csv')
        default('SANDHI_RULES', 'sandhi-rules.csv')
        default('UNPREFIXED_ROOTS', 'unprefixed-roots.csv')
        default('VERBAL_INDECLINABLES', 'verbal-indeclinables.csv')
        default('VERBS', 'verbs.csv')
        default('VERB_ENDINGS', 'verb-endings.csv')
        default('VERB_PREFIXES', 'verb-prefixes.csv')
        default('VERB_STEMS', 'verb-stems.csv')

        if connect and 'DATABASE_URI' in self.config:
            self.connect()
示例#51
0
    def test_check_content_with_mocked_http_server(self, httpserver):
        httpserver.serve_content(open(os.path.join(fix_dir, 'lotw-user-activity.csv')).read())

        namespace = {}
        execfile(os.path.join(fix_dir,"lotw_fixture.py"), namespace)
        assert get_lotw_users(url=httpserver.url) == namespace['lotw_fixture']
示例#52
0
# we would prefer a symbolic link, but it does not work on windows
from past.builtins import execfile
import os
target = os.path.join(os.path.dirname(__file__),
                      '../../opentuner/utils/adddeps.py')
execfile(target, dict(__file__=target))

示例#53
0
    with open('allitems.txt') as allitems:
        for item in allitems:
            kind, chapter_c, number_c = itemparser.match(item).groups()

            number = int(number_c)

            if chapter_c.isdigit():
                chapter = int(chapter_c)
                mask = '{}_{:02d}_{:02d}.py'
            else:
                chapter = chapter_c
                mask = '{}_{}_{}.py'

            filename = mask.format(kind, chapter, number)
            try:
                execfile(filename)
                status = SUCCESS
            except IOError:
                status = NOTIMPLEMENTED
            except Exception as err:
                status = FAILED
                message = traceback.format_exc()

            statuscounter[status] += 1

            if status != NOTIMPLEMENTED:
                print(kind, chapter, number, status)

            if status == FAILED:
                faillist.append([kind, chapter, number])
                print(message)
示例#54
0
from past.builtins import execfile
import sys
from os.path import normpath, realpath, dirname, join, isfile

project_root = normpath(join(dirname(realpath(__file__)), '../..'))

if 'venv' not in ','.join(sys.path):
  venv_activate = join(project_root, 'venv/bin/activate_this.py')
  if isfile(venv_activate):
    execfile(venv_activate, dict(__file__=venv_activate))

sys.path.insert(0, project_root)

示例#55
0
def MasterProcessingController(argv=None):
    import argparse
    import configparser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject', action="store", dest='subject', required=True,
                       help='The name of the subject to process')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    parser.add_argument('-rewrite_datasinks', action='store_true', default=False,
                        help='Use if the datasinks should be forced rerun.\nDefault: value in configuration file')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    args = parser.parse_args()

    config = configparser.ConfigParser(allow_no_value=True)
    config.read(args.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE = setDataSinkRewriteValue(args.rewrite_datasinks,
                                                       config.getboolean('NIPYPE', 'GLOBAL_DATA_SINK_REWRITE'))
    experiment = get_experiment_settings(config)
    # Platform specific information
    environment = get_environment_settings(config)
    if environment['cluster']:
        cluster = get_cluster_settings(config)
    sys.path = environment('PYTHONPATH')
    os.environ['PATH'] = ':'.join(environment['PATH'])
    # Virtualenv
    if not environment['virtualenv_dir'] is None:
        print("Loading virtualenv_dir...")
        execfile(environment['virtualenv_dir'], dict(__file__=environment['virtualenv_dir']))
    ###### Now ensure that all the required packages can be read in from this custom path
    # \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    # print sys.path
    ## Check to ensure that SimpleITK can be found
    import SimpleITK as sitk
    from nipype import config  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode()  # NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    # config.enable_provenance()

    ##############################################################################
    from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
    from nipype.interfaces.base import traits, isdefined, BaseInterface
    from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
    import nipype.interfaces.io as nio  # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    from nipype.interfaces.freesurfer import ReconAll

    from nipype.utils.misc import package_check
    # package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
    package_check('numpy', '1.3', 'tutorial1')
    package_check('scipy', '0.7', 'tutorial1')
    package_check('networkx', '1.0', 'tutorial1')
    package_check('IPython', '0.10', 'tutorial1')

    try:
        verify_empty_freesurfer_env()
    except EnvironmentError:
        raise

    # Define platform specific output write paths
    if not os.path.exists(experiment['output_cache']):
        os.makedirs(experiment['output_cache'])
    if not os.path.exists(experiment['output_results']):
        os.makedirs(experiment['output_results'])
    if 'input_results' in list(experiment.keys()):
        assert os.path.exists(
            experiment['input_results']), "The previous experiment directory does not exist: {0}".format(
            experiment['input_results'])

    # \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    #    Define platform specific output write paths
    mountPrefix = expConfig.get(input_arguments.processingEnvironment, 'MOUNTPREFIX')
    BASEOUTPUTDIR = expConfig.get(input_arguments.processingEnvironment, 'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, ExperimentName))
    ExperimentBaseDirectoryCache = ExperimentBaseDirectoryPrefix + "_CACHE"
    ExperimentBaseDirectoryResults = ExperimentBaseDirectoryPrefix + "_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)
    if not PreviousExperimentName is None:
        PreviousBaseDirectoryPrefix = os.path.realpath(os.path.join(BASEOUTPUTDIR, PreviousExperimentName))
        PreviousBaseDirectoryResults = PreviousBaseDirectoryPrefix + "_Results"
        assert os.path.exists(
            PreviousBaseDirectoryResults), "The previous experiment directory does not exist: {0}".format(
            PreviousBaseDirectoryResults)
    else:
        PreviousBaseDirectoryResults = None
    # Define workup common reference data sets
    #    The ATLAS needs to be copied to the ExperimentBaseDirectoryPrefix
    #    The ATLAS pathing must stay constant
    ATLASPATH = expConfig.get(input_arguments.processingEnvironment, 'ATLASPATH')
    if not os.path.exists(ATLASPATH):
        print("ERROR:  Invalid Path for Atlas: {0}".format(ATLASPATH))
        sys.exit(-1)
    CACHE_ATLASPATH = os.path.realpath(os.path.join(ExperimentBaseDirectoryCache, 'Atlas'))
    from distutils.dir_util import copy_tree
    if not os.path.exists(CACHE_ATLASPATH):
        print("Copying a reference of the atlas to the experiment cache directory:\n    from: {0}\n    to: {1}".format(
            ATLASPATH, CACHE_ATLASPATH))
        copy_tree(ATLASPATH, CACHE_ATLASPATH, preserve_mode=1, preserve_times=1)
        ## Now generate the xml file with the correct pathing
        file_replace(os.path.join(ATLASPATH, 'ExtendedAtlasDefinition.xml.in'),
                     os.path.join(CACHE_ATLASPATH, 'ExtendedAtlasDefinition.xml'), "@ATLAS_INSTALL_DIRECTORY@",
                     CACHE_ATLASPATH)
    else:
        print("Atlas already exists in experiment cache directory: {0}".format(CACHE_ATLASPATH))

    ## Set custom environmental variables so that subproceses work properly (i.e. for FreeSurfer)
    CUSTOM_ENVIRONMENT = eval(environment['misc'])
    # print CUSTOM_ENVIRONMENT
    for key, value in list(CUSTOM_ENVIRONMENT.items()):
        # print "SETTING: ", key, value
        os.putenv(key, value)
        os.environ[key] = value
    # print os.environ
    # sys.exit(-1)

    WORKFLOW_COMPONENTS = experiment['components']
    if 'FREESURFER' in WORKFLOW_COMPONENTS:
        check_freesurfer_environment()

    cluster = setup_cpu(args.wfrun, config)  # None unless wfrun is 'helium*' or 'ipl_OSX', then dict()

    print("Configuring Pipeline")
    ## Ensure that entire db is built and cached before parallel section starts.
    _ignoreme = OpenSubjectDatabase(experiment['output_cache'], ["all"], environment['prefix'],
                                    environment['subject_data_file'])
    to_do_subjects = args.subject.split(',')
    if to_do_subjects[0] == "all":
        to_do_subjects = _ignoreme.getAllSubjects()
    _ignoreme = None

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.

    JOB_SCRIPT = get_global_sge_script(sys.path, os.environ['PATH'], CUSTOM_ENVIRONMENT, MODULES)
    print(JOB_SCRIPT)

    # Randomly shuffle to_do_subjects to get max
    import random
    random.shuffle(to_do_subjects)

    ## Make a list of all the arguments to be processed
    sp_args_list = list()
    start_time = time.time()
    subj_index = 1
    for subjectid in to_do_subjects:
        delay = 2.5 * subj_index
        subj_index += 1
        print("START DELAY: {0}".format(delay))
        sp_args = (CACHE_ATLASPATH, CLUSTER_QUEUE, CLUSTER_QUEUE_LONG, QSTAT_IMMEDIATE_EXE, QSTAT_CACHED_EXE,
                   experiment['output_cache'], experiment['output_results'], environment['subject_data_file'],
                   GLOBAL_DATA_SINK_REWRITE, JOB_SCRIPT, WORKFLOW_COMPONENTS, args,
                   mountPrefix, start_time + delay, subjectid, PreviousBaseDirectoryResult)
        sp_args_list.append(sp_args)
    if 'local' in args.wfrun:
        print("RUNNING WITHOUT POOL BUILDING")
        for sp_args in sp_args_list:
            DoSingleSubjectProcessing(sp_args)
    else:
        ## Make a pool of workers to submit simultaneously
        from multiprocessing import Pool
        myPool = Pool(processes=64, maxtasksperchild=1)
        all_results = myPool.map_async(DoSingleSubjectProcessing, sp_args_list).get(1e100)

        for indx in range(0, len(sp_args_list)):
            if all_results[indx] == False:
                print("FAILED for {0}".format(sp_args_list[indx][-1]))

    print("THIS RUN OF BAW FOR SUBJS {0} HAS COMPLETED".format(to_do_subjects))
    return 0
示例#56
0
# J. Duriez ([email protected])

# to import with yade/python/ipython solveLaplace_uc.py, or with execfile('solveLaplace_uc.py',globals()) once inside a yade/python/ipython session

from past.builtins import execfile

from builtins import range
execfile('solveLiqBridge.py',globals())

def solveLaplace_uc(theta,rRatio,uStar,delta1Cons,deltaZ,save):
    # Solves Laplace-Young equation for given r=rRatio, uc*=uStar and theta
    #   Making recursively use of solveLiqBridge function, the present function
    #   identifies all possible bridges configurations for given r, uc*, and
    #   theta

    # Input attributes (see also solveLiqBridge function):
    # theta: contact angle (deg)
    # rRatio: rMax / rMin for the considered particles pair
    # uStar: dimensionless capillary pressure
    # delta1Cons: numpy.array of delta1 (see solveLiqBridge) values to consider
    # deltaZ: see solveLiqBridge
    # save: 0/1 to save text files including the capillary bridges data (if 1)

    # Returns a 2D numpy.array of capillary bridges data (see sol_u below),
    # possibly (in case save=1) also written in text files.



    # Removal of possible d1=0 since such bridge is physically impossible, and
    # d1 = 0 might make trouble if theta = 0 (leads to inf. initial rhoPrime):
    delta1Cons = delta1Cons[delta1Cons!=0]
示例#57
0
文件: checkList.py 项目: yade/trunk
from past.builtins import execfile
import yade,math,os,sys

scriptsToRun=os.listdir(checksPath)
resultStatus = 0
nFailed=0
failedScripts=list()

skipScripts = ['checkList.py']

for script in scriptsToRun:
	if (script[len(script)-3:]==".py" and script not in skipScripts):
		try:
			print("###################################")
			print("running: ",script)
			execfile(checksPath+"/"+script)
			if (resultStatus>nFailed):
				print('\033[91m'+"Status: FAILURE!!!"+'\033[0m')
				nFailed=resultStatus
				failedScripts.append(script)
			else:
				print("Status: success")
			print("___________________________________")
		except Exception as e:
			resultStatus+=1
			nFailed=resultStatus
			failedScripts.append(script)
			print('\033[91m',script," failure, caught exception: ",e,'\033[0m')
		O.reset()
	elif (script in skipScripts):
		print("###################################")
示例#58
0
import os
from matplotlib.pyplot import *
import matplotlib.gridspec as gridspec

#######
# INPUT
timeStepSave = 0.1	# Data saved every timeStepSave seconds in the simulation
startFile = 0		# File where the post-processing begins
endFile = 400		# File where the post-processing ends

scriptPath = os.path.abspath(os.path.dirname(sys.argv[-1])) #Path where the script is stored
if os.path.exists(scriptPath +'/data/')==False:	#If the data folder does not exist, no data to extract, exit. 
	print('\n There is no data to extract in this folder ! Please first run sedimentTransportExample_1DRANSCoupling.py !\n')
	exit()
else:	#Else, extract the first file in order to get the size of the vectors, ndimz
	execfile(scriptPath +'/data/0.py')
	ndimz = len(phiPartPY)	#Size of the vectors, mesh parameter. 

#Initilization of the variable to extract and plot
[qs,time,phiPart,vxPart,vxFluid] = [[],[],np.zeros(ndimz),np.zeros(ndimz),np.zeros(ndimz)]

########
# LOOP over time to EXTRACT the data
########
for i in range(startFile,endFile):
	nameFile = scriptPath +'/data/' + str(i)+'.py'	#Name of the file at the considered time step
	if  os.path.exists(nameFile)==False:	#Check if the file exist
		print('\nThe file {0} does not exist, stop the post processing at this stage !\n'.format(nameFile))
		endFile = i-1	#Last file processed
		break
	#Extract the data from at the time (file) considered. Assign the vectors to qsMean, phiPartPY, vxPartPY, vxFluidPY, and zAxis see the structure of a saved file X.py. 
示例#59
0
    raise OSError('This must be run from doc/examples directory')

# Run the conversion from .py to rst file
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples')
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples/frontiers_paper')

# Make the index.rst file
"""
index = open('index.rst', 'w')
index.write(examples_header)
for name in [os.path.splitext(f)[0] for f in glob('*.rst')]:
    #Don't add the index in there to avoid sphinx errors and don't add the
    #note_about examples again (because it was added at the top):
    if name not in(['index','note_about_examples']):
        index.write('   %s\n' % name)
index.close()
"""

# Execute each python script in the directory.
if '--no-exec' in sys.argv:
    pass
else:
    if not os.path.isdir('fig'):
        os.mkdir('fig')

    for script in glob('*.py'):
        figure_basename = pjoin('fig', os.path.splitext(script)[0])
        execfile(script)
        plt.close('all')

示例#60
0
f1 = gts.Face(e1,e2,e3)

e4 = gts.Edge(v4,v3)
e5 = gts.Edge(v3,v2)
f2 = gts.Face(e2,e4,e5)

s1 = gts.Surface()
s1.add(f1)
s1.add(f2)

facet = gtsSurface2Facets(s1,wire = False,material=mat)
O.bodies.append(facet)


# --- Identification of spheres onJoint, and so on:
execfile('identifBis.py')


# --- Engines definition
O.engines=[
	ForceResetter(),
	InsertionSortCollider([Bo1_Sphere_Aabb()]),
	InteractionLoop(
		[Ig2_Sphere_Sphere_ScGeom()],
		[Ip2_JCFpmMat_JCFpmMat_JCFpmPhys(cohesiveTresholdIteration=1)],
		[Law2_ScGeom_JCFpmPhys_JointedCohesiveFrictionalPM(smoothJoint=True)]),
	GlobalStiffnessTimeStepper(timestepSafetyCoefficient=0.8),
        NewtonIntegrator(damping=0.2),
        PyRunner(command='afficheIt()',initRun=True,iterPeriod=1000),
]
def afficheIt():