コード例 #1
0
 def store_clicked(b):
     l=line.value
     v=varName.value
     name=self.__getObjectName(self)
     store=self.__getStorageName()
     if mode.value=="Time":
         t=time.value
         print_("Storing fields at t=",t,"on line",l,"from",directoryName,"in",self.path,"to variable",v)
         cmdBase="%s.sampleTime('%s','%s','%s')" % (name,directoryName,l,t)
         if store:
             sname,sval=store
             cmd="%s=%s('%s',lambda:%s)" % (v,sname,v,cmdBase)
             val=sval(v,lambda:self.sampleTime(directoryName,l,t))
         else:
             cmd="%s=%s" % (v,cmdBase)
             val=self.sampleTime(directoryName,l,t)
     elif mode.value=="Field":
         f=field.value
         print_("Storing fields",f," at all times on line",l,"from",directoryName,"in",self.path,"to variable",v)
         cmdBase="%s.sampleField('%s','%s','%s')" % (name,directoryName,l,f)
         if store:
             sname,sval=store
             cmd="%s=%s('%s',lambda:%s)" % (v,sname,v,cmdBase)
             val=sval(v,lambda:self.sampleField(directoryName,l,f))
         else:
             cmd="%s=%s" % (v,cmdBase)
             val=self.sampleField(directoryName,l,f)
     else:
         print_("Unknown mode",mode)
         return
     create_code_cell(cmd,"below")
     get_ipython().push({v:val})
     varName.value=""
コード例 #2
0
ファイル: yamlmagic.py プロジェクト: bollwyvl/yamlmagic
    def yaml(self, line, cell):
        line = line.strip()
        args = magic_arguments.parse_argstring(self.yaml, line)

        display(Javascript(
            """
            require(
                [
                    "notebook/js/codecell",
                    "codemirror/mode/yaml/yaml"
                ],
                function(cc){
                    cc.CodeCell.options_default.highlight_modes.magic_yaml = {
                        reg: ["^%%yaml"]
                    }
                }
            );
            """))

        loader = get_ipython().user_global_ns.get(args.loader, None)
        if loader is None:
            loader = import_item(args.loader)

        try:
            val = yaml.load(cell, Loader=loader)
        except yaml.YAMLError as err:
            print(err)
            return

        if args.var_name is not None:
            get_ipython().user_ns[args.var_name] = val
        else:
            return val
コード例 #3
0
ファイル: highlighter.py プロジェクト: wilywampa/vimconfig
 def __init__(self, *args, **kwargs):
     super(HighlightTextFormatter, self).__init__(*args, **kwargs)
     self.color = False
     for event in available_events:
         if event != 'pre_run_cell':
             get_ipython().events.register(event, self.disable_color)
     get_ipython().events.register('pre_run_cell', self.enable_color)
コード例 #4
0
ファイル: bench_blas.py プロジェクト: Titan-C/helpful_scripts
def run_bench(threads=None):
    if threads is not None:
        num_threads(threads)
    for N in [1000, 2000, 4000]:
        a = np.random.rand(N, N)
        print("Matrix multiplication N=" + str(N))
        get_ipython().magic('time c = np.dot(a, a)')
コード例 #5
0
ファイル: _demo.py プロジェクト: robwalton/diffcalc
 def echorun_magiccmd(self, magic_cmd):
     if IPYTHON:
         from IPython import get_ipython
         echo(magic_cmd)
         get_ipython().magic(magic_cmd) 
     else:  # Python
         # python's help is interactive. Handle specially 
         if magic_cmd == 'help ub':
             
             echo("help ub")
             exec("print ub.__doc__", self.namespace)
             return
         if magic_cmd == 'help hkl':
             echo("help(hkl)")
             exec("print hkl.__doc__", self.namespace)
             return  
         
         # Echo the Python version of the magic command   
         tokens = diffcmd.ipython.tokenify(magic_cmd)
         if not tokens:
             return
         python_cmd = tokens.pop(0) + '(' + ', '.join(tokens) + ')'
         python_cmd = python_cmd.replace('[, ', '[')
         python_cmd = python_cmd.replace(',]', ']')
         python_cmd = python_cmd.replace(', ]', ']')
         echo(python_cmd)
         
         # Run the Python version of the magic command
         elements = diffcmd.ipython.parse(magic_cmd, self.namespace)
         func = elements.pop(0)
         result = func(*elements)
         if result:
             print result
コード例 #6
0
ファイル: lambda_filter.py プロジェクト: pbiczo/vimconfig
    def transform(self, line, continue_prompt):
        # Don't modify multi-line statements
        if continue_prompt:
            return line
        try:
            list(self.tokens(line))
        except tokenize.TokenError:
            return line

        # Split line on semicolons
        cols = [col for ttype, token, (_, col), _, _
                in self.tokens(line) if ttype == tokenize.OP and token == ';']

        parts = [line[s + 1:e]
                 for s, e in zip([-1] + cols, cols + [len(line)])]
        try:
            parts = [self.parens(part) for part in parts]
        except IndexError:
            return line
        newline = ';'.join(parts)

        # Replace '\' characters with 'lambda '
        cols = [col for _, token, (_, col), _, _
                in self.tokens(newline) if token == '\\' and
                newline[col + 1:].strip() != '']
        for col in reversed(cols):
            newline = newline[:col] + 'lambda ' + newline[col + 1:]

        if newline.strip() != line.strip():
            get_ipython().auto_rewrite_input(newline)

        return newline
コード例 #7
0
def test_install_editor():
    called = []
    def fake_popen(*args, **kwargs):
        called.append({
            'args': args,
            'kwargs': kwargs,
        })
        return mock.MagicMock(**{'wait.return_value': 0})
    editorhooks.install_editor('foo -l {line} -f {filename}', wait=False)
    
    with mock.patch('subprocess.Popen', fake_popen):
        get_ipython().hooks.editor('the file', 64)
    
    nt.assert_equal(len(called), 1)
    args = called[0]['args']
    kwargs = called[0]['kwargs']
    
    nt.assert_equal(kwargs, {'shell': True})
    
    if sys.platform.startswith('win'):
        expected = ['foo', '-l', '64', '-f', 'the file']
    else:
        expected = "foo -l 64 -f 'the file'"
    cmd = args[0]
    nt.assert_equal(cmd, expected)
コード例 #8
0
ファイル: ui.py プロジェクト: aliyun/aliyun-odps-python-sdk
    def register_retry_magic():
        from .runner import get_retry_mode, set_retry_mode
        try:
            if in_ipython_frontend():
                from IPython import get_ipython
                from IPython.core.magic import register_line_magic

                @register_line_magic
                def retry(_):
                    global retry_via_magic

                    if not get_retry_mode():
                        retry_via_magic = True
                        set_retry_mode(True)
                    return ''

                del retry

                def auto_cancel_retry():
                    global retry_via_magic
                    if retry_via_magic:
                        retry_via_magic = False
                        set_retry_mode(False)

                get_ipython().events.register('post_execute', auto_cancel_retry)
        except ImportError:
            pass
コード例 #9
0
ファイル: ipython.py プロジェクト: amshenoy/nengo_gui
    def start_server(cls, cfg, model):
        # Make sure only one server is writing the same config.
        server_thread = cls.threads.get(cfg, None)
        server = cls.servers.get(cfg, None)
        existent = server_thread is not None and server is not None
        if existent and server_thread.is_alive():
            warnings.warn(ConfigReuseWarning(
                "Reusing config. Only the most recent visualization will "
                "update the config."))
            for page in server.gui.pages:
                page.save_config(force=True)
                page.filename_cfg = get_ipython().mktempfile()
                cls.servers[page.filename_cfg] = server
                cls.threads[page.filename_cfg] = server_thread

        name = model.label
        gui = nengo_gui.GUI(
            name, cfg=cfg, model=model, locals=get_ipython().user_ns,
            interactive=False, allow_file_change=False)
        server = gui.prepare_server(port=0, browser=False)
        server_thread = threading.Thread(
            target=gui.begin_lifecycle,
            kwargs={'server': server})
        server_thread.start()
        cls.servers[cfg] = server
        cls.threads[cfg] = server_thread
        cls.configs.add(cfg)
        return server_thread, server
コード例 #10
0
ファイル: make_manual.py プロジェクト: robwalton/diffcalc
def _capture_magic_command_output(magic_cmd, lineno, filepath):
    orig_stdout = sys.stdout
    result = StringIO()
    sys.stdout = result
    
    def log_error():
        msg = "Error on line %i of %s evaluating '%s'" % (lineno, filepath, magic_cmd)
        sys.stderr.write('\n' + '=' * 79 + '\n' + msg + '\n' +'v' * 79 + '\n')
        return msg
    
    try:
        line_magics = get_ipython().magics_manager.magics['line']
        magic = magic_cmd.split(' ')[0]
        if magic not in line_magics:
            msg = log_error()
            raise Exception(msg + " ('%s' is not a magic command)" % magic)           
        get_ipython().magic(magic_cmd)
    except:
        log_error()
        raise
    finally:
        sys.stdout = orig_stdout

    result_lines = result.getvalue().split('\n')
        
    # trim trailing lines which are whitespace only
    while result_lines and (result_lines[-1].isspace() or not result_lines[-1]):
        result_lines.pop()
        
    return result_lines
    

    
コード例 #11
0
ファイル: __init__.py プロジェクト: karies/ROOTaaS
def iPythonize():
    utils.setStyle()
    for capture in utils.captures: capture.register()
    ExtensionManager(get_ipython()).load_extension("ROOTaaS.iPyROOT.cppmagic")
    ExtensionManager(get_ipython()).load_extension("ROOTaaS.iPyROOT.dclmagic")
    ROOT.toCpp = toCpp
    welcomeMsg()
コード例 #12
0
ファイル: sparse_problem.py プロジェクト: stephane-caron/oqp
def time_sparse_solvers():
    instructions = {
        solver: "u = solve_qp(P, q, G, h, solver='%s')" % solver
        for solver in sparse_solvers}
    print "\nSparse solvers",
    print "\n--------------"
    for solver, instr in instructions.iteritems():
        print "%s: " % solver,
        get_ipython().magic(u'timeit %s' % instr)
コード例 #13
0
ファイル: util.py プロジェクト: brynpickering/calliope
def type_of_script():
    if get_ipython() is not None:
        ipy_str = str(type(get_ipython()))
        if 'zmqshell' in ipy_str:
            return 'jupyter'
        if 'terminal' in ipy_str:
            return 'ipython'
    else:
        return 'terminal'
コード例 #14
0
ファイル: utils.py プロジェクト: dpiparo/ROOTaaS
def toCpp():
    '''
    Change the mode of the notebook to CPP. It is preferred to use cell magic,
    but this option is handy to set up servers and for debugging purposes.
    '''
    cpptransformer.load_ipython_extension(get_ipython())
    cppcompleter.load_ipython_extension(get_ipython())
    # Change highlight mode
    IPython.display.display_javascript(jsDefaultHighlight.format(mimeType = cppMIME), raw=True)
    print "Notebook is in Cpp mode"
コード例 #15
0
ファイル: nbdoc.py プロジェクト: SiddharthTiwari/fastai
def doc(elt):
    "Show `show_doc` info in preview window along with link to full docs."
    global use_relative_links
    use_relative_links = False
    elt = getattr(elt, '__func__', elt)
    md = show_doc(elt, markdown=False)
    if is_fastai_class(elt):
        md += f'\n\n<a href="{get_fn_link(elt)}" target="_blank" rel="noreferrer noopener">Show in docs</a>'
    output = md2html(md)
    use_relative_links = True
    if IS_IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output)
    else:
        try: page.page({'text/html': output})
        except: display(Markdown(md))
コード例 #16
0
ファイル: kernel.py プロジェクト: keflavich/specview
def connected_kernel(**kwargs):
    """Connect to another kernel running in
       the current process

    This only works on IPython v1.0 and above

    Parameters
    ----------
    kwargs : Extra variables to put into the namespace
    """
    kernel_info = {}

    shell = get_ipython()
    if shell is None:
        raise RuntimeError("There is no IPython kernel in this process")

    try:
        client = QtKernelClient(connection_file=get_connection_file())
        client.load_connection_file()
        client.start_channels()
        kernel_info['client'] = client
        kernel_info['shell'] = shell
    except Exception:
        print ('Detected running from an ipython interpreter.\n'
               'The GUI console will be disabled.')
        kernel_info['client'] = None
        kernel_info['shell'] = None

    return kernel_info
コード例 #17
0
ファイル: test_magic.py プロジェクト: jakevdp/ipython
def test_script_bg_out_err():
    ip = get_ipython()
    ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2")
    nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
    nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
    ip.user_ns['output'].close()
    ip.user_ns['error'].close()
コード例 #18
0
ファイル: utils.py プロジェクト: xpdAcq/xpdAcq
def import_sample_info(saf_num=None, bt=None):
    """ import sample metadata based on a spreadsheet

    this function expects a pre-populated '<SAF_number>_sample.xls' file
    located under `xpdUser/import` directory. Corresponding Sample objects
    will be created after information stored being parsed. Please go to
    http://xpdacq.github.io for parser rules.

    Parameters
    ----------
    saf_num : int
        Safety Approval Form number of beamtime.
    bt : xpdacq.beamtime.Beamtime
        beamtime object that is going to be linked with these samples
    """

    if bt is None:
        error_msg = "WARNING: Beamtime object does not exist in current"\
                    "ipython session. Please make sure:\n"\
                    "1. a beamtime has been started\n"\
                    "2. double check 'bt_bt.yml' exists under "\
                    "xpdUser/config_base/yml directory.\n"\
                    "\n"\
                    "If any of these checks fails or problem "\
                    "persists, please contact beamline staff immediately"
        _check_obj('bt', error_msg)  # raise NameError if bt is not alive
        ips = get_ipython()
        bt = ips.ns_table['user_global']['bt']

    # pass to core function
    _import_sample_info(saf_num=saf_num, bt=bt)
コード例 #19
0
ファイル: findspark.py プロジェクト: ay27/findspark
def edit_ipython_profile(spark_home, spark_python, py4j):
    """Adds a startup file to the current IPython profile to import pyspark.

    The startup file sets the required enviornment variables and imports pyspark.

    Parameters
    ----------
    spark_home : str
        Path to Spark installation.
    spark_python : str
        Path to python subdirectory of Spark installation.
    py4j : str
        Path to py4j library.
    """

    ip = get_ipython()

    if ip:
        profile_dir = ip.profile_dir.location
    else:
        from IPython.utils.path import locate_profile
        profile_dir = locate_profile()

    startup_file_loc = os.path.join(profile_dir, "startup", "findspark.py")

    with open(startup_file_loc, 'w') as startup_file:
        #Lines of code to be run when IPython starts
        startup_file.write("import sys, os\n")
        startup_file.write("os.environ['SPARK_HOME'] = '" + spark_home + "'\n")
        startup_file.write("sys.path[:0] = " + str([spark_python, py4j]) + "\n")
        startup_file.write("import pyspark\n")
コード例 #20
0
ファイル: runtime.py プロジェクト: twosigma/beaker-notebook
def get_context_session():
    kernel = get_ipython().kernel
    # if subkernel get session from extra start parameters
    if len(kernel.parent.argv) == 3:
        context_json = base64.b64decode(kernel.parent.argv[2]).decode('UTF-8')
        return json.loads(context_json)['contextId']
    return kernel.session.session
コード例 #21
0
    def __enter__(self):
        """Called upon entering output widget context manager."""
        self._flush()
        kernel = get_ipython().kernel
        session = kernel.session
        send = session.send
        self._original_send = send
        self._session = session

        def send_hook(stream, msg_or_type, content=None, parent=None, ident=None,
             buffers=None, track=False, header=None, metadata=None):

            # Handle both prebuild messages and unbuilt messages.
            if isinstance(msg_or_type, (Message, dict)):
                msg_type = msg_or_type['msg_type']
                msg = dict(msg_or_type)
            else:
                msg_type = msg_or_type
                msg = session.msg(msg_type, content=content, parent=parent,
                    header=header, metadata=metadata)

            # If this is a message type that we want to forward, forward it.
            if stream is kernel.iopub_socket and msg_type in ['clear_output', 'stream', 'display_data']:
                self.send(msg)
            else:
                send(stream, msg, ident=ident, buffers=buffers, track=track)

        session.send = send_hook
コード例 #22
0
 def find_module(self, fullname, path=None):
     if self._called:
         return
     # return
     if fullname not in ('pylab', 'matplotlib.pyplot'):
         return
     # Don't call me again
     self._called = True
     try:
         sys.meta_path.remove(self)
     except ValueError:
         pass
     
     ip = get_ipython()
     if ip is None:
         return
     
     if ip.pylab_gui_select:
         return
     
     # default to inline in kernel environments
     if hasattr(ip, 'kernel'):
         print('enabling inline matplotlib')
         ip.enable_matplotlib('inline')
     else:
         print('enabling matplotlib')
         ip.enable_matplotlib()
コード例 #23
0
 def __enter__(self):
     """Called upon entering output widget context manager."""
     self._flush()
     ip = get_ipython()
     if ip and hasattr(ip, 'kernel') and hasattr(ip.kernel, '_parent_header'):
         self.msg_id = ip.kernel._parent_header['header']['msg_id']
         self.__counter += 1
コード例 #24
0
ファイル: utils.py プロジェクト: dpiparo/ROOTaaS
 def __init__(self, stream, ip=get_ipython()):
     streamsFileNo={sys.stderr:2,sys.stdout:1}
     self.pipe_out = None
     self.pipe_in = None
     self.sysStreamFile = stream
     self.sysStreamFileNo = streamsFileNo[stream]
     self.shell = ip
コード例 #25
0
ファイル: bill_test.py プロジェクト: cbun/narrative
def _try_perl(meth):
    """Run a Perl command. Maybe. 
    
    :return: A string
    :rtype: kbtypes.Unicode
    """
    meth.stages = 2  # for reporting progress
    meth.advance("Starting...")

    from IPython import get_ipython
    ipy = get_ipython()

    meth.advance("Running")
    ipy.run_cell_magic('perl', 
                       '--out perl_lines', 
                       'use JSON;'
                       'my $token = $ENV{"KB_AUTH_TOKEN"};'
                       'my @arr = ("foo", "bar", "baz");'
                       'my $foo;'
                       '$foo->{"what"}=\@arr;'
                       '$foo->{"token"}=$token;'
                       'print encode_json($foo);')
    
    res = ipy.user_variables(['perl_lines'])['perl_lines']['data']['text/plain'][1:-1]
    res = json.loads(res)
    return json.dumps({'lines' : res})
コード例 #26
0
def register_magics():
    """
    register magics function, can be called from a notebook
    """
    from IPython import get_ipython
    ip = get_ipython()
    ip.register_magics(CustomMagics)
コード例 #27
0
ファイル: test_completer.py プロジェクト: marcosptf/fedora
def test_line_magics():
    ip = get_ipython()
    c = ip.Completer
    s, matches = c.complete(None, 'lsmag')
    nt.assert_in('%lsmagic', matches)
    s, matches = c.complete(None, '%lsmag')
    nt.assert_in('%lsmagic', matches)
コード例 #28
0
ファイル: test_completer.py プロジェクト: marcosptf/fedora
def test_dict_key_completion_bytes():
    """Test handling of bytes in dict key completion"""
    ip = get_ipython()
    complete = ip.Completer.complete

    ip.user_ns['d'] = {'abc': None, b'abd': None}

    _, matches = complete(line_buffer="d[")
    nt.assert_in("'abc'", matches)
    nt.assert_in("b'abd'", matches)

    if False:  # not currently implemented
        _, matches = complete(line_buffer="d[b")
        nt.assert_in("b'abd'", matches)
        nt.assert_not_in("b'abc'", matches)

        _, matches = complete(line_buffer="d[b'")
        nt.assert_in("abd", matches)
        nt.assert_not_in("abc", matches)

        _, matches = complete(line_buffer="d[B'")
        nt.assert_in("abd", matches)
        nt.assert_not_in("abc", matches)

        _, matches = complete(line_buffer="d['")
        nt.assert_in("abc", matches)
        nt.assert_not_in("abd", matches)
コード例 #29
0
ファイル: test_completer.py プロジェクト: marcosptf/fedora
def test_magic_completion_order():

    ip = get_ipython()
    c = ip.Completer

    # Test ordering of magics and non-magics with the same name
    # We want the non-magic first

    # Before importing matplotlib, there should only be one option:

    text, matches = c.complete('mat')
    nt.assert_equal(matches, ["%matplotlib"])


    ip.run_cell("matplotlib = 1")  # introduce name into namespace

    # After the import, there should be two options, ordered like this:
    text, matches = c.complete('mat')
    nt.assert_equal(matches, ["matplotlib", "%matplotlib"])


    ip.run_cell("timeit = 1")  # define a user variable called 'timeit'

    # Order of user variable and line and cell magics with same name:
    text, matches = c.complete('timeit')
    nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
コード例 #30
0
ファイル: test_completer.py プロジェクト: marcosptf/fedora
def test_omit__names():
    # also happens to test IPCompleter as a configurable
    ip = get_ipython()
    ip._hidden_attr = 1
    ip._x = {}
    c = ip.Completer
    ip.ex('ip=get_ipython()')
    cfg = Config()
    cfg.IPCompleter.omit__names = 0
    c.update_config(cfg)
    s,matches = c.complete('ip.')
    nt.assert_in('ip.__str__', matches)
    nt.assert_in('ip._hidden_attr', matches)
    cfg = Config()
    cfg.IPCompleter.omit__names = 1
    c.update_config(cfg)
    s,matches = c.complete('ip.')
    nt.assert_not_in('ip.__str__', matches)
    nt.assert_in('ip._hidden_attr', matches)
    cfg = Config()
    cfg.IPCompleter.omit__names = 2
    c.update_config(cfg)
    s,matches = c.complete('ip.')
    nt.assert_not_in('ip.__str__', matches)
    nt.assert_not_in('ip._hidden_attr', matches)
    s,matches = c.complete('ip._x.')
    nt.assert_in('ip._x.keys', matches)
    del ip._hidden_attr
コード例 #31
0
ファイル: desc_vis.py プロジェクト: yccai/scikit-chem
 def initialize_ipython():
     ipython = get_ipython()
     try:
         ipython.magic('matplotlib inline')
     except:
         pass
コード例 #32
0
ファイル: test_magic.py プロジェクト: reibenz/ipython
def test_script_bg_out_err():
    ip = get_ipython()
    ip.run_cell_magic("script", "--bg --out output --err error sh",
                      "echo 'hi'\necho 'hello' >&2")
    nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
    nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
コード例 #33
0
"""
Created on Tue Nov 19 14:10:50 2019

@author: cijzendoornvan
"""
##################################
####          PACKAGES        ####
##################################
import json
import numpy as np
import pandas as pd
import os.path
import pickle
import matplotlib.pyplot as plt
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib',
                             'auto')  ## %matplotlib auto TO GET WINDOW FIGURE


#################################
####        FUNCTIONS        ####
#################################
def plot_relation(Dir_variables, variables, title, x_name, y_name, Dir,
                  file_name, area, xlimits, ylimits):
    pickle_file1 = Dir_variables + variables[0] + '_dataframe.pickle'
    pickle_file2 = Dir_variables + variables[1] + '_dataframe.pickle'

    if os.path.exists(pickle_file1) and os.path.exists(pickle_file2):
        Variable_values1 = pickle.load(open(pickle_file1,
                                            'rb'))  #load pickle of dimension
        Variable_values2 = pickle.load(open(pickle_file2,
                                            'rb'))  #load pickle of dimension
コード例 #34
0
    def __init__(self,
                 color_scheme=None,
                 completekey=None,
                 stdin=None,
                 stdout=None,
                 context=5,
                 **kwargs):
        """Create a new IPython debugger.

        Parameters
        ----------
        color_scheme : default None
            Deprecated, do not use.
        completekey : default None
            Passed to pdb.Pdb.
        stdin : default None
            Passed to pdb.Pdb.
        stdout : default None
            Passed to pdb.Pdb.
        context : int
            Number of lines of source code context to show when
            displaying stacktrace information.
        **kwargs
            Passed to pdb.Pdb.

        Notes
        -----
        The possibilities are python version dependent, see the python
        docs for more info.
        """

        # Parent constructor:
        try:
            self.context = int(context)
            if self.context <= 0:
                raise ValueError("Context must be a positive integer")
        except (TypeError, ValueError):
            raise ValueError("Context must be a positive integer")

        # `kwargs` ensures full compatibility with stdlib's `pdb.Pdb`.
        OldPdb.__init__(self, completekey, stdin, stdout, **kwargs)

        # IPython changes...
        self.shell = get_ipython()

        if self.shell is None:
            save_main = sys.modules['__main__']
            # No IPython instance running, we must create one
            from IPython.terminal.interactiveshell import \
                TerminalInteractiveShell
            self.shell = TerminalInteractiveShell.instance()
            # needed by any code which calls __import__("__main__") after
            # the debugger was entered. See also #9941.
            sys.modules['__main__'] = save_main

        if color_scheme is not None:
            warnings.warn(
                "The `color_scheme` argument is deprecated since version 5.1",
                DeprecationWarning,
                stacklevel=2)
        else:
            color_scheme = self.shell.colors

        self.aliases = {}

        # Create color table: we copy the default one from the traceback
        # module and add a few attributes needed for debugging
        self.color_scheme_table = exception_colors()

        # shorthands
        C = coloransi.TermColors
        cst = self.color_scheme_table

        cst['NoColor'].colors.prompt = C.NoColor
        cst['NoColor'].colors.breakpoint_enabled = C.NoColor
        cst['NoColor'].colors.breakpoint_disabled = C.NoColor

        cst['Linux'].colors.prompt = C.Green
        cst['Linux'].colors.breakpoint_enabled = C.LightRed
        cst['Linux'].colors.breakpoint_disabled = C.Red

        cst['LightBG'].colors.prompt = C.Blue
        cst['LightBG'].colors.breakpoint_enabled = C.LightRed
        cst['LightBG'].colors.breakpoint_disabled = C.Red

        cst['Neutral'].colors.prompt = C.Blue
        cst['Neutral'].colors.breakpoint_enabled = C.LightRed
        cst['Neutral'].colors.breakpoint_disabled = C.Red

        # Add a python parser so we can syntax highlight source while
        # debugging.
        self.parser = PyColorize.Parser(style=color_scheme)
        self.set_colors(color_scheme)

        # Set the prompt - the default prompt is '(Pdb)'
        self.prompt = prompt
        self.skip_hidden = True
        self.report_skipped = True

        # list of predicates we use to skip frames
        self._predicates = self.default_predicates
コード例 #35
0
def test_forward_unicode_completion():
    ip = get_ipython()

    name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
    nt.assert_equal(len(matches), 1)
    nt.assert_equal(matches[0], 'Ⅴ')
コード例 #36
0
#%%
import os
from pathlib import Path

import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from IPython import get_ipython  # just to decieve flake8

import src.utils as utils

get_ipython().run_line_magic("autoreload", "2")

get_ipython().run_line_magic("matplotlib", "inline")
os.getcwd()

#%% [markdown]
# ### Choose experiment, print out configurations

#%%
base_path = "./maggot_models/models/runs/"
experiment = "drosophila-5-rdpg-sbm"
run = 4
config = utils.load_config(base_path, experiment, run)
sbm_df = utils.load_pickle(base_path, experiment, run, "sbm_master_df")
sbm_df = sbm_df.apply(pd.to_numeric)
#%% [markdown]
# ### Plot the noise observed in SBM model fitting

#%%
# Plotting setup}
コード例 #37
0
ファイル: test_magic.py プロジェクト: reibenz/ipython
def test_script_bg_out():
    ip = get_ipython()
    ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'")
    nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
コード例 #38
0
    def train_network(self):
        if (os.path.exists('checkpoints-crnn') == False):
            get_ipython().system('mkdir checkpoints-crnn')

        validation_acc = []
        validation_loss = []
        train_acc = []
        train_loss = []

        with tf.Session(graph=self.graph) as sess:
            sess.run(tf.global_variables_initializer())
            iteration = 1

            for e in range(self.epochs):
                # Initialize
                state = sess.run(self.initial_state)

                # Loop over batches
                for x, y in get_batches(self.X_tr, self.y_tr, self.batch_size):

                    # Feed dictionary
                    feed = {self.inputs_: x, self.labels_: y, self.keep_prob_: 0.5,
                            self.initial_state: state, self.learning_rate_: self.learning_rate}

                    loss, _, state, acc = sess.run([self.cost, self.optimizer, self.final_state, self.accuracy],
                                                   feed_dict=feed)
                    train_acc.append(acc)
                    train_loss.append(loss)

                    # Print at each 5 iters
                    if (iteration % 5 == 0):
                        print("Epoch: {}/{}".format(e, self.epochs),
                              "Iteration: {:d}".format(iteration),
                              "Train loss: {:6f}".format(loss),
                              "Train acc: {:.6f}".format(acc))

                    # Compute validation loss at every 25 iterations
                    if (iteration % 25 == 0):

                        # Initiate for validation set
                        val_state = sess.run(self.cell.zero_state(self.batch_size, tf.float32))

                        val_acc_ = []
                        val_loss_ = []
                        for x_v, y_v in get_batches(self.X_vld, self.y_vld, self.batch_size):
                            # Feed
                            feed = {self.inputs_: x_v, self.labels_: y_v, self.keep_prob_: 1.0, self.initial_state: val_state}

                            # Loss
                            loss_v, state_v, acc_v = sess.run([self.cost, self.final_state, self.accuracy], feed_dict=feed)

                            val_acc_.append(acc_v)
                            val_loss_.append(loss_v)

                        # Print info
                        print("Epoch: {}/{}".format(e, self.epochs),
                              "Iteration: {:d}".format(iteration),
                              "Validation loss: {:6f}".format(np.mean(val_loss_)),
                              "Validation acc: {:.6f}".format(np.mean(val_acc_)))

                        # Store
                        validation_acc.append(np.mean(val_acc_))
                        validation_loss.append(np.mean(val_loss_))

                    # Iterate
                    iteration += 1

            self.saver.save(sess, "checkpoints-crnn/har.ckpt")

        # Plot training and test loss
        t = np.arange(iteration - 1)
        plt.figure(figsize=(6, 6))
        plt.plot(t, np.array(train_loss), 'r-', t[t % 25 == 0], np.array(validation_loss), 'b*')
        plt.xlabel("iteration")
        plt.ylabel("Loss")
        plt.legend(['train', 'validation'], loc='upper right')
        plt.show()

        # Plot Accuracies
        plt.figure(figsize=(6, 6))
        plt.plot(t, np.array(train_acc), 'r-', t[t % 25 == 0], validation_acc, 'b*')
        plt.xlabel("iteration")
        plt.ylabel("Accuray")
        plt.legend(['train', 'validation'], loc='upper right')
        plt.show()
コード例 #39
0
from bluesky.suspenders import SuspendFloor, SuspendBoolHigh, SuspendBoolLow
from IPython import get_ipython

user_ns = get_ipython().user_ns

#RE.clear_suspenders()
all_BMM_suspenders = list()

## ----------------------------------------------------------------------------------
## suspend when I0 drops below 0.1 nA (not in use)
suspender_I0 = SuspendFloor(user_ns['quadem1'].I0,
                            0.1,
                            resume_thresh=1,
                            sleep=5)
#all_BMM_suspenders.append(suspender_I0)

## ----------------------------------------------------------------------------------
## suspend upon beam dump, resume 30 seconds after hitting 90% of fill target
try:
    if user_ns['ring'].filltarget.get() > 20:
        suspender_ring_current = SuspendFloor(user_ns['ring'].current,
                                              10,
                                              resume_thresh=0.9 *
                                              user_ns['ring'].filltarget.get(),
                                              sleep=60)
        all_BMM_suspenders.append(suspender_ring_current)
except:
    pass

## ----------------------------------------------------------------------------------
## suspend if the BM photon shutter closes, resume 5 seconds after opening
コード例 #40
0
def test_nested_import_module_completer():
    ip = get_ipython()
    _, matches = ip.complete(None, 'import IPython.co', 17)
    nt.assert_in('IPython.core', matches)
    nt.assert_not_in('import IPython.core', matches)
    nt.assert_not_in('IPython.display', matches)
コード例 #41
0
    def __init__(self, colors=None):
        """
        DEPRECATED

        Create a local debugger instance.

        Parameters
        ----------

        colors : str, optional
            The name of the color scheme to use, it must be one of IPython's
            valid color schemes.  If not given, the function will default to
            the current IPython scheme when running inside IPython, and to
            'NoColor' otherwise.

        Examples
        --------
        ::

            from IPython.core.debugger import Tracer; debug_here = Tracer()

        Later in your code::

            debug_here()  # -> will open up the debugger at that point.

        Once the debugger activates, you can use all of its regular commands to
        step through code, set breakpoints, etc.  See the pdb documentation
        from the Python standard library for usage details.
        """
        warnings.warn(
            "`Tracer` is deprecated since version 5.1, directly use "
            "`IPython.core.debugger.Pdb.set_trace()`",
            DeprecationWarning,
            stacklevel=2)

        ip = get_ipython()
        if ip is None:
            # Outside of ipython, we set our own exception hook manually
            sys.excepthook = functools.partial(BdbQuit_excepthook,
                                               excepthook=sys.excepthook)
            def_colors = 'NoColor'
        else:
            # In ipython, we use its custom exception handler mechanism
            def_colors = ip.colors
            ip.set_custom_exc((bdb.BdbQuit, ), BdbQuit_IPython_excepthook)

        if colors is None:
            colors = def_colors

        # The stdlib debugger internally uses a modified repr from the `repr`
        # module, that limits the length of printed strings to a hardcoded
        # limit of 30 characters.  That much trimming is too aggressive, let's
        # at least raise that limit to 80 chars, which should be enough for
        # most interactive uses.
        try:
            from reprlib import aRepr
            aRepr.maxstring = 80
        except:
            # This is only a user-facing convenience, so any error we encounter
            # here can be warned about but can be otherwise ignored.  These
            # printouts will tell us about problems if this API changes
            import traceback
            traceback.print_exc()

        self.debugger = Pdb(colors)
コード例 #42
0
def test_import_module_completer():
    ip = get_ipython()
    _, matches = ip.complete('i', 'import i')
    nt.assert_in('io', matches)
    nt.assert_not_in('int', matches)
コード例 #43
0
    os.chdir(os.path.join(os.getcwd(), 'code'))
    print(os.getcwd())
except:
    pass
# %%
from IPython import get_ipython

# %%
import reader
import numpy as np
import pandas as pd
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
from matplotlib import pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns

from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ParameterGrid

# %% [markdown]
# #### Loading the dataset

# %%
data = reader.get_all_data()
data.head()
コード例 #44
0
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 23:48:36 2020

@author: websterkgd
"""

#clear environment
from IPython import get_ipython
get_ipython().magic('reset -sf')
from IPython import get_ipython

#import packages for data analysis
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt

#change directory to directory with data
os.chdir('D:\\a_Desktops_Git\\Current\\SpringBoard\\Capstone')

#import the data
duod = pd.read_csv('settles.acl16.learning_traces.13m.csv')  #takes 2 minutes

#pull out how many users
lu = duod.user_id.values
lu = list(set(lu))  # runs quickly len 115,222

#pulling about 60 user ~ 1.5 min (fine for prelim analysis)
# create a list of dictionaries
ldu_oh = {}
コード例 #45
0
ファイル: test_magic.py プロジェクト: reibenz/ipython
def test_script_err():
    ip = get_ipython()
    ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2")
    nt.assert_equal(ip.user_ns['error'], 'hello\n')
コード例 #46
0
ファイル: util.py プロジェクト: Bodo-inc/ipyparallel
def _execute(code):
    """helper method for implementing `client.execute` via `client.apply`"""
    user_ns = get_ipython().user_global_ns
    exec(code, user_ns)
コード例 #47
0
ファイル: uiButtons.py プロジェクト: ULeth-Math-CS/Callysto
                        if (code_shown) {
                            $('div.input').hide('500');
                            $('#toggleButton').val('Show Code')
                        } else {
                            $('div.input').show('500');
                            $('#toggleButton').val('Hide Code')
                        }
                        code_shown = !code_shown
                    }

                    document.getElementById('init').onclick = function () {
                        runAll = document.getElementById('run_all_cells_below').childNodes[1]
                        console.log(runAll)
                        runAll.click()
                    }

                    $(document).ready(function () {
                        code_shown = true;
                        $('div.input').hide()
                    });
                </script>
                <input type="submit" id="toggleButton" value="Show Code">
                <input id="init" type="submit" value="Initialize">'''

        self.shell.run_cell(raw_code, store_history=False)


#define more custom magic function here as needed.

ip = get_ipython()
ip.register_magics(MyMagics)
コード例 #48
0
ファイル: model.py プロジェクト: BaldrLector/hmr2.0
import os
import sys
import time

# to make run from console for module import
sys.path.append(os.path.abspath(".."))

# tf INFO and WARNING messages are not printed
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import tensorflow as tf

try:
    from IPython import get_ipython

    ipy_str = str(type(get_ipython()))
    if 'zmqshell' in ipy_str:
        from tqdm import tqdm_notebook as tqdm
    else:
        from tqdm import tqdm
except:
    from tqdm import tqdm

from main.config import Config
from main.dataset import Dataset
from main.discriminator import Discriminator
from main.generator import Generator
from main.model_util import batch_align_by_pelvis, batch_compute_similarity_transform, batch_rodrigues

import tensorflow.compat.v1.losses as v1_loss
コード例 #49
0
# Package imports
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
import matplotlib

# Display plots inline and change default figure size
from IPython import get_ipython
get_ipython().magic(u'matplotlib inline')
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)

# Generate a dataset and plot it
np.random.seed(0)
X, y = sklearn.datasets.make_moons(200, noise=0.20)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)

# Train the logistic regression classifier# Train
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X, y)


# Helper function to plot a decision boundary.
# If you don't fully understand this function don't worry, it just generates the contour plot below.
def plot_decision_boundary(pred_func):
    # Set min and max values and give it some padding
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    h = 0.01
    # Generate a grid of points with distance h between them
コード例 #50
0
ファイル: hardware_utils.py プロジェクト: robingong/fonduer
import codecs
import csv
import logging
from builtins import range

from fonduer.supervision.models import GoldLabel, GoldLabelKey

try:
    from IPython import get_ipython

    if "IPKernelApp" not in get_ipython().config:
        raise ImportError("console")
except (AttributeError, ImportError):
    from tqdm import tqdm
else:
    from tqdm import tqdm_notebook as tqdm


logger = logging.getLogger(__name__)

# Define labels
ABSTAIN = 0
FALSE = 1
TRUE = 2


def get_gold_dict(
    filename, doc_on=True, part_on=True, val_on=True, attribute=None, docs=None
):
    with codecs.open(filename, encoding="utf-8") as csvfile:
        gold_reader = csv.reader(csvfile)
コード例 #51
0
    font = {'family': 'sans', 'weight': 'normal', 'size': 22}

    plt.rc('font', **font)

    plt.figure(figsize=(18, 14))

    #plt.subplot(1, 2, 2)
    plt.imshow(mdiff, cmap='Greens', origin='lower')
    plt.colorbar()
    plt.clim(0.7, 1)
    plt.savefig("unigram_jaccard_50_green_07")
    plt.show()


try:
    get_ipython()
    import plotly.offline as py
except Exception:
    #
    # Fall back to matplotlib if we're not in a notebook, or if plotly is
    # unavailable for whatever reason.
    #
    plot_difference = plot_difference_matplotlib
else:
    py.init_notebook_mode()
    plot_difference = plot_difference_plotly


def topic_distance():
    """Topicunähnlichkeit/Topicdistanz"""
    import pprint
コード例 #52
0
try:
    from IPython import get_ipython

    get_ipython().run_line_magic("load_ext", "autoreload")
    get_ipython().run_line_magic("autoreload", "2")
    # get_ipython().run_line_magic('matplotlib', 'inline')
    print("Auto-reloading enabled.")
except AttributeError:
    pass

import sys

# XXX: is this necessary?
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm, colors
import sys, os
import errno
import re
import datetime
from icae.tools.config_loader import config
import box


def mkdir_p(path):
コード例 #53
0
ファイル: test_magic.py プロジェクト: reibenz/ipython
def test_script_config():
    ip = get_ipython()
    ip.config.ScriptMagics.script_magics = ['whoda']
    sm = script.ScriptMagics(shell=ip)
    nt.assert_in('whoda', sm.magics['cell'])
コード例 #54
0
import pandas as pd
import csv
import time
import datetime
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from IPython import get_ipython
ipy = get_ipython()
import math
from sklearn.svm import SVC
from sklearn.decomposition import TruncatedSVD
from sklearn import metrics
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
def AUC(labels_test,predictions):
    labels_true = labels_test.tolist()
    predictions = predictions.tolist()
    for i in range(len(labels_true)):
        labels_true[i]=int(labels_true[i])
    for i in range(len(predictions)):
        predictions[i]=int(predictions[i])
    fpr, tpr, thresholds = metrics.roc_curve(labels_true,predictions, pos_label=1)
    return metrics.auc(fpr, tpr)
    #print("AUC :" , end = '')
    #print(metrics.auc(fpr, tpr))
    #print("\n")

def adjusted_classes(y_scores, t):
def __reset__(): get_ipython().magic('reset -sf')

# import OpenSeesPy rendering module
from openseespy.postprocessing.Get_Rendering import *
コード例 #56
0
def test_dict_key_completion_string():
    """Test dictionary key completion for string keys"""
    ip = get_ipython()
    complete = ip.Completer.complete

    ip.user_ns['d'] = {'abc': None}

    # check completion at different stages
    _, matches = complete(line_buffer="d[")
    nt.assert_in("'abc'", matches)
    nt.assert_not_in("'abc']", matches)

    _, matches = complete(line_buffer="d['")
    nt.assert_in("abc", matches)
    nt.assert_not_in("abc']", matches)

    _, matches = complete(line_buffer="d['a")
    nt.assert_in("abc", matches)
    nt.assert_not_in("abc']", matches)

    # check use of different quoting
    _, matches = complete(line_buffer="d[\"")
    nt.assert_in("abc", matches)
    nt.assert_not_in('abc\"]', matches)

    _, matches = complete(line_buffer="d[\"a")
    nt.assert_in("abc", matches)
    nt.assert_not_in('abc\"]', matches)

    # check sensitivity to following context
    _, matches = complete(line_buffer="d[]", cursor_pos=2)
    nt.assert_in("'abc'", matches)

    _, matches = complete(line_buffer="d['']", cursor_pos=3)
    nt.assert_in("abc", matches)
    nt.assert_not_in("abc'", matches)
    nt.assert_not_in("abc']", matches)

    # check multiple solutions are correctly returned and that noise is not
    ip.user_ns['d'] = {
        'abc': None,
        'abd': None,
        'bad': None,
        object(): None,
        5: None
    }

    _, matches = complete(line_buffer="d['a")
    nt.assert_in("abc", matches)
    nt.assert_in("abd", matches)
    nt.assert_not_in("bad", matches)
    assert not any(m.endswith((']', '"', "'")) for m in matches), matches

    # check escaping and whitespace
    ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
    _, matches = complete(line_buffer="d['a")
    nt.assert_in("a\\nb", matches)
    nt.assert_in("a\\'b", matches)
    nt.assert_in("a\"b", matches)
    nt.assert_in("a word", matches)
    assert not any(m.endswith((']', '"', "'")) for m in matches), matches

    # - can complete on non-initial word of the string
    _, matches = complete(line_buffer="d['a w")
    nt.assert_in("word", matches)

    # - understands quote escaping
    _, matches = complete(line_buffer="d['a\\'")
    nt.assert_in("b", matches)

    # - default quoting should work like repr
    _, matches = complete(line_buffer="d[")
    nt.assert_in("\"a'b\"", matches)

    # - when opening quote with ", possible to match with unescaped apostrophe
    _, matches = complete(line_buffer="d[\"a'")
    nt.assert_in("b", matches)

    # need to not split at delims that readline won't split at
    if '-' not in ip.Completer.splitter.delims:
        ip.user_ns['d'] = {'before-after': None}
        _, matches = complete(line_buffer="d['before-af")
        nt.assert_in('before-after', matches)
コード例 #57
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 13 15:07:34 2021

@author: Diogo
"""
"""
Clear the console and remove all variables present on the namespace. This is 
useful to prevent Python from consuming more RAM each time I run the code.
"""
try:
    from IPython import get_ipython
    get_ipython().magic('clear')
    get_ipython().magic('reset -f')
except:
    pass

from pathlib import Path
import pandas as pd
from datetime import datetime
import numpy as np
from tqdm import tqdm
"""
Underlying asset
"""
"""Create dataframe (df) for the data of the underlying from August to 
September 2018"""
underlying1 = pd.read_csv("Raw data/Underlying/SPX_August-September_2018.csv")

# Create df for the data of the underlying from July to August 2019
#     </ul>
# </div>
# <br>
# <hr>
# %% [markdown]
# ### Import libraries
# Lets first import the required libraries.
# Also run <b> %matplotlib inline </b> since we will be plotting in this section.

# %%
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
get_ipython().run_line_magic('matplotlib', 'inline')

# %% [markdown]
# <h1 id="random_generated_dataset">k-Means on a randomly generated dataset</h1>
# Lets create our own dataset for this lab!
#
# %% [markdown]
# First we need to set up a random seed. Use <b>numpy's random.seed()</b> function, where the seed will be set to <b>0</b>

# %%
np.random.seed(0)

# %% [markdown]
# Next we will be making <i> random clusters </i> of points by using the <b> make_blobs </b> class. The <b> make_blobs </b> class can take in many inputs, but we will be using these specific ones. <br> <br>
# <b> <u> Input </u> </b>
# <ul>
コード例 #59
0
import ode_solver
import time
import numpy as np
import matplotlib.pyplot as plt
from IPython import get_ipython
ipython = get_ipython()
backends = ['%matplotlib','%matplotlib inline']
ipython.magic(backends[0])

start_time = time.time()

m2kft = 3.28084/1e3            # kft/m

ry0 = [2253.996, 575]               # initial conditions
sol = ode_solver.shoot(ry0, 0, 20)

t = sol[0]; y = sol[1]*m2kft
fig = plt.figure(1)
plt.xlabel('t (s)'); plt.ylabel('y (kft)'); plt.title('Altitude'); plt.grid();
yy = fig.add_subplot(111); line1, = yy.plot(t, y, 'b', label='y(t)') 
plt.show()
 
# %% 
plt.pause(0.5)
ry1 = [4572, 575-100]               # initial conditions
sol1 = ode_solver.shoot(ry1, 5, 20)
t1 = sol1[0]; y1 = sol1[1]*m2kft
plt.plot(t1,y1,'r')

#line1.set_ydata(y1)
#fig.canvas.draw()
コード例 #60
0
def test_from_module_completer():
    ip = get_ipython()
    _, matches = ip.complete('B', 'from io import B', 16)
    nt.assert_in('BytesIO', matches)
    nt.assert_not_in('BaseException', matches)