def refill_readline_hist(self): # Load the last 1000 lines from history self.readline.clear_history() stdin_encoding = sys.stdin.encoding or "utf-8" last_cell = u"" for _, _, cell in self.history_manager.get_tail(self.history_load_length, include_latest=True): # Ignore blank lines and consecutive duplicates cell = cell.rstrip() if cell and (cell != last_cell): try: if self.multiline_history: self.readline.add_history(py3compat.unicode_to_str(cell, stdin_encoding)) else: for line in cell.splitlines(): self.readline.add_history(py3compat.unicode_to_str(line, stdin_encoding)) last_cell = cell except (TypeError, ValueError) as e: # The history DB can get corrupted so it returns strings # containing null bytes, which readline objects to. warn(("Failed to add string to readline history.\n" "Error: {}\n" "Cell: {!r}").format(e, cell))
def refill_readline_hist(self): # Load the last 1000 lines from history self.readline.clear_history() stdin_encoding = sys.stdin.encoding or "utf-8" last_cell = u"" for _, _, cell in self.history_manager.get_tail( self.history_load_length, include_latest=True): # Ignore blank lines and consecutive duplicates cell = cell.rstrip() if cell and (cell != last_cell): try: if self.multiline_history: self.readline.add_history( py3compat.unicode_to_str(cell, stdin_encoding)) else: for line in cell.splitlines(): self.readline.add_history( py3compat.unicode_to_str(line, stdin_encoding)) last_cell = cell except (TypeError, ValueError) as e: # The history DB can get corrupted so it returns strings # containing null bytes, which readline objects to. warn(("Failed to add string to readline history.\n" "Error: {}\n" "Cell: {!r}").format(e, cell))
def test_replace_multiline_hist_replaces_twice(self): """Test that multiline entries are replaced twice""" ip = get_ipython() ip.multiline_history = True ip.readline.add_history(u'line0') #start cell hlen_b4_cell = ip.readline.get_current_history_length() ip.readline.add_history('l€ne1') ip.readline.add_history('line2') #replace cell with single line hlen_b4_cell = ip._replace_rlhist_multiline(u'l€ne1\nline2', hlen_b4_cell) ip.readline.add_history('l€ne3') ip.readline.add_history('line4') #replace cell with single line hlen_b4_cell = ip._replace_rlhist_multiline(u'l€ne3\nline4', hlen_b4_cell) self.assertEqual(ip.readline.get_current_history_length(), hlen_b4_cell) hist = self.rl_hist_entries(ip.readline, 3) expected = [u'line0', u'l€ne1\nline2', u'l€ne3\nline4'] # perform encoding, in case of casting due to ASCII locale enc = sys.stdin.encoding or "utf-8" expected = [ py3compat.unicode_to_str(e, enc) for e in expected ] self.assertEqual(hist, expected)
def __init__(self, ipython_app, kernel_manager, notebook_manager, cluster_manager, log, base_project_url, settings_overrides): handlers = [ (r"/", ProjectDashboardHandler), (r"/login", LoginHandler), (r"/logout", LogoutHandler), (r"/new", NewHandler), (r"/%s" % _notebook_id_regex, NamedNotebookHandler), (r"/%s/copy" % _notebook_id_regex, NotebookCopyHandler), (r"/%s/print" % _notebook_id_regex, PrintNotebookHandler), (r"/kernels", MainKernelHandler), (r"/kernels/%s" % _kernel_id_regex, KernelHandler), (r"/kernels/%s/%s" % (_kernel_id_regex, _kernel_action_regex), KernelActionHandler), (r"/kernels/%s/iopub" % _kernel_id_regex, IOPubHandler), (r"/kernels/%s/shell" % _kernel_id_regex, ShellHandler), (r"/notebooks", NotebookRootHandler), (r"/notebooks/%s" % _notebook_id_regex, NotebookHandler), (r"/rstservice/render", RSTHandler), (r"/files/(.*)", AuthenticatedFileHandler, {'path' : notebook_manager.notebook_dir}), (r"/clusters", MainClusterHandler), (r"/clusters/%s/%s" % (_profile_regex, _cluster_action_regex), ClusterActionHandler), (r"/clusters/%s" % _profile_regex, ClusterProfileHandler), ] # Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and # base_project_url will always be unicode, which will in turn # make the patterns unicode, and ultimately result in unicode # keys in kwargs to handler._execute(**kwargs) in tornado. # This enforces that base_project_url be ascii in that situation. # # Note that the URLs these patterns check against are escaped, # and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'. base_project_url = py3compat.unicode_to_str(base_project_url, 'ascii') settings = dict( template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), cookie_secret=os.urandom(1024), login_url="%s/login"%(base_project_url.rstrip('/')), ) # allow custom overrides for the tornado web app. settings.update(settings_overrides) # prepend base_project_url onto the patterns that we match new_handlers = [] for handler in handlers: pattern = url_path_join(base_project_url, handler[0]) new_handler = tuple([pattern]+list(handler[1:])) new_handlers.append( new_handler ) super(NotebookWebApplication, self).__init__(new_handlers, **settings) self.kernel_manager = kernel_manager self.notebook_manager = notebook_manager self.cluster_manager = cluster_manager self.ipython_app = ipython_app self.read_only = self.ipython_app.read_only self.log = log
def imaris_pull(self, line): '''Line-level magic that pulls objects from Imaris. You can pull: spots, surfaces, filaments or cells. In [6]: %imaris_pull spots Out[6]: ['Spots 1', 'Spots 1 Selection'] In [7]: spots[_[0]] Out[7]: 36ffa5dc-2456-4c94-9ad3-e639acd67122 -t:tcp -h 192.168.1.11 -p 62088 ''' if line == "": print(self.imaris_pull.__doc__) return ret = [] outputs = line.split(' ') for output in outputs: output = unicode_to_str(output).lower() if output in ["spots", "surfaces", "filaments", "cells"]: objs = BridgeLib.GetSurpassObjects(self.vImaris, output) self.shell.push({output: objs}) ret.append(objs.keys()) else: raise ImarisMagicError('No such object available') if len(outputs) == 1: return ret[0] else: return ret
def test_encode_images(): # invalid data, but the header and footer are from real files pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82' jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9' pdfdata = b'%PDF-1.\ntrailer<</Root<</Pages<</Kids[<</MediaBox[0 0 3 3]>>]>>>>>>' fmt = { 'image/png' : pngdata, 'image/jpeg' : jpegdata, 'application/pdf' : pdfdata } encoded = encode_images(fmt) for key, value in iteritems(fmt): # encoded has unicode, want bytes decoded = decodestring(encoded[key].encode('ascii')) nt.assert_equal(decoded, value) encoded2 = encode_images(encoded) nt.assert_equal(encoded, encoded2) b64_str = {} for key, encoded in iteritems(encoded): b64_str[key] = unicode_to_str(encoded) encoded3 = encode_images(b64_str) nt.assert_equal(encoded3, b64_str) for key, value in iteritems(fmt): # encoded3 has str, want bytes decoded = decodestring(str_to_bytes(encoded3[key])) nt.assert_equal(decoded, value)
def ferret_getdata(self, line): ''' Line-level magic to get data from ferret. In [18]: %%ferret ....: use levitus_climatology In [19]: %ferret_getdata tempdict = temp ....: Message: tempdict is now available in python as a dictionary containing the variable's metadata and data array. In [20]: print tempdict.keys() ....: ['axis_coords', 'axis_types', 'data_unit', 'axis_units', 'title', 'axis_names', 'missing_value', 'data'] ''' args = parse_argstring(self.ferret_getdata, line) code = unicode_to_str(''.join(args.code)) pythonvariable = code.split('=')[0] ferretvariable = code.split('=')[1] exec('%s = pyferret.getdata("%s", %s)' % (pythonvariable, ferretvariable, args.create_mask) ) self.shell.push("%s" % pythonvariable) publish_display_data('ferretMagic.ferret', {'text/html': '<pre style="background-color:#F2F5A9; border-radius: 4px 4px 4px 4px; font-size: smaller">' + 'Message: ' + pythonvariable + " is now available in python as a dictionary containing the variable's metadata and data array." '</pre>' })
def imaris_pull(self, line): '''Line-level magic that pulls objects from Imaris. You can pull: spots, surfaces, filaments or cells. In [6]: %imaris_pull spots Out[6]: ['Spots 1', 'Spots 1 Selection'] In [7]: spots[_[0]] Out[7]: 36ffa5dc-2456-4c94-9ad3-e639acd67122 -t:tcp -h 192.168.1.11 -p 62088 ''' if line == "": print(self.imaris_pull.__doc__) return ret = [] outputs = line.split(' ') for output in outputs: output = unicode_to_str(output).lower() if output in ["spots", "surfaces", "filaments", "cells"]: objs = BridgeLib.GetSurpassObjects(self.vImaris,output) self.shell.push({output: objs}) ret.append(objs.keys()) else: raise ImarisMagicError('No such object available') if len(outputs) == 1: return ret[0] else: return ret
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell): """Store multiple lines as a single entry in history""" # do nothing without readline or disabled multiline if not self.has_readline or not self.multiline_history: return hlen_before_cell # windows rl has no remove_history_item if not hasattr(self.readline, "remove_history_item"): return hlen_before_cell # skip empty cells if not source_raw.rstrip(): return hlen_before_cell # nothing changed do nothing, e.g. when rl removes consecutive dups hlen = self.readline.get_current_history_length() if hlen == hlen_before_cell: return hlen_before_cell for i in range(hlen - hlen_before_cell): self.readline.remove_history_item(hlen - i - 1) stdin_encoding = get_stream_enc(sys.stdin, 'utf-8') self.readline.add_history( py3compat.unicode_to_str(source_raw.rstrip(), stdin_encoding)) return self.readline.get_current_history_length()
def complete_request(self, text): line = str_to_unicode(readline.get_line_buffer()) byte_cursor_pos = readline.get_endidx() # get_endidx is a byte offset # account for multi-byte characters to get correct cursor_pos bytes_before_cursor = cast_bytes(line)[:byte_cursor_pos] cursor_pos = len(cast_unicode(bytes_before_cursor)) # send completion request to kernel # Give the kernel up to 5s to respond msg_id = self.client.complete( code=line, cursor_pos=cursor_pos, ) msg = self.client.shell_channel.get_msg(timeout=self.timeout) if msg['parent_header']['msg_id'] == msg_id: content = msg['content'] cursor_start = content['cursor_start'] matches = [line[:cursor_start] + m for m in content['matches']] if content["cursor_end"] < cursor_pos: extra = line[content["cursor_end"]:cursor_pos] matches = [m + extra for m in matches] matches = [unicode_to_str(m) for m in matches] return matches return []
def eval(self, line): """Evaluate a line or block of code in R Parameters ---------- line : str The code to execute Examples -------- >>> r.eval(''' ... x = 1:5 ... df = data.frame(x=x, y=x^2) ... print(df) ... ''') x y 1 1 1 2 2 4 3 3 9 4 4 16 5 5 25 """ old_writeconsole = ri.get_writeconsole() ri.set_writeconsole(self._write_console) try: value = ri.baseenv['eval'](ri.parse(line)) except (ri.RRuntimeError, ValueError) as exception: warning_or_other_msg = self._flush() # otherwise next return seems to have copy of error raise RInterpreterError(line, str_to_unicode(str(exception)), warning_or_other_msg) text_output = self._flush() ri.set_writeconsole(old_writeconsole) if text_output: sys.stdout.write(unicode_to_str(text_output, 'utf-8'))
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell): """Store multiple lines as a single entry in history""" # do nothing without readline or disabled multiline if not self.has_readline or not self.multiline_history: return hlen_before_cell # windows rl has no remove_history_item if not hasattr(self.readline, "remove_history_item"): return hlen_before_cell # skip empty cells if not source_raw.rstrip(): return hlen_before_cell # nothing changed do nothing, e.g. when rl removes consecutive dups hlen = self.readline.get_current_history_length() if hlen == hlen_before_cell: return hlen_before_cell for i in range(hlen - hlen_before_cell): self.readline.remove_history_item(hlen - i - 1) stdin_encoding = get_stream_enc(sys.stdin, 'utf-8') self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(), stdin_encoding)) return self.readline.get_current_history_length()
def test_encode_images(): # invalid data, but the header and footer are from real files pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82' jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9' pdfdata = b'%PDF-1.\ntrailer<</Root<</Pages<</Kids[<</MediaBox[0 0 3 3]>>]>>>>>>' fmt = { 'image/png': pngdata, 'image/jpeg': jpegdata, 'application/pdf': pdfdata } encoded = encode_images(fmt) for key, value in iteritems(fmt): # encoded has unicode, want bytes decoded = decodestring(encoded[key].encode('ascii')) nt.assert_equal(decoded, value) encoded2 = encode_images(encoded) nt.assert_equal(encoded, encoded2) b64_str = {} for key, encoded in iteritems(encoded): b64_str[key] = unicode_to_str(encoded) encoded3 = encode_images(b64_str) nt.assert_equal(encoded3, b64_str) for key, value in iteritems(fmt): # encoded3 has str, want bytes decoded = decodestring(str_to_bytes(encoded3[key])) nt.assert_equal(decoded, value)
def url_escape(path): """Escape special characters in a URL path Turns '/foo bar/' into '/foo%20bar/' """ parts = py3compat.unicode_to_str(path, encoding='utf8').split('/') return u'/'.join([quote(p) for p in parts])
def warpscript(self, line, cell=None, local_ns=None): args = parse_argstring(self.warpscript, line) if args.url is not None: if len(args.url[0]) > 1: warnings.warn("\nOnly one url accepted", UserWarning) url = unicode_to_str(args.url[0][0]) binary_code = code.encode('UTF-8') req = urllib2.Request(url, binary_code) rsp = urllib2.urlopen(req) content = rsp.read() if not silent: str_response = content.decode('UTF-8') obj = json.loads(str_response) stream_content = {'name': 'stdout', 'text': content.decode('UTF-8')} self.send_response(self.iopub_socket, 'stream', stream_content) return {'status': 'ok', # The base class increments the execution count 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, }
def ferret_getdata(self, line): ''' Line-level magic to get data from ferret. In [18]: %%ferret ....: use levitus_climatology In [19]: %ferret_getdata tempdict = temp ....: Message: tempdict is now available in python as a dictionary containing the variable's metadata and data array. In [20]: print(tempdict.keys()) ....: ['axis_coords', 'axis_types', 'data_unit', 'axis_units', 'title', 'axis_names', 'missing_value', 'data'] ''' args = parse_argstring(self.ferret_getdata, line) code = unicode_to_str(''.join(args.code)) pythonvariable = code.split('=')[0] ferretvariable = code.split('=')[1] exec('%s = pyferret.getdata("%s", %s)' % (pythonvariable, ferretvariable, args.create_mask) ) self.shell.push("%s" % pythonvariable) publish_display_data({'text/html': '<pre style="background-color:#F2F5A9; border-radius: 4px 4px 4px 4px; font-size: smaller">' + 'Message: ' + pythonvariable + " is now available in python as a dictionary containing the variable's metadata and data array." '</pre>' })
def ferret_putdata(self, line): ''' Line-level magic to put data to ferret. In [31]: import numpy as np ....: b = {} ....: b['name']='myvar' ....: b['name']='myvar' ....: x=np.linspace(-np.pi*4, np.pi*4, 500) ....: b['data']=np.sin(x)/x ....: b.keys() Out[31]: ['data', 'name'] In [32]: %ferret_putdata --axis_pos (1,0,2,3,4,5) b ....: Message: b is now available in ferret as myvar ''' args = parse_argstring(self.ferret_putdata, line) ferretvariable = unicode_to_str(args.code[0]) if args.axis_pos: axis_pos_variable = eval(args.axis_pos) else: axis_pos_variable = None pyferret.putdata(self.shell.user_ns[ferretvariable], axis_pos=axis_pos_variable) publish_display_data('ferretMagic.ferret', {'text/html': '<pre style="background-color:#F2F5A9; border-radius: 4px 4px 4px 4px; font-size: smaller">' + 'Message: ' + ferretvariable + ' is now available in ferret as ' + self.shell.user_ns[ferretvariable]['name'] + '</pre>' })
def ferret_putdata(self, line): ''' Line-level magic to put data to ferret. In [31]: import numpy as np ....: b = {} ....: b['name']='myvar' ....: b['name']='myvar' ....: x=np.linspace(-np.pi*4, np.pi*4, 500) ....: b['data']=np.sin(x)/x ....: b.keys() Out[31]: ['data', 'name'] In [32]: %ferret_putdata --axis_pos (1,0,2,3,4,5) b ....: Message: b is now available in ferret as myvar ''' args = parse_argstring(self.ferret_putdata, line) ferretvariable = unicode_to_str(args.code[0]) if args.axis_pos: axis_pos_variable = eval(args.axis_pos) else: axis_pos_variable = None pyferret.putdata(self.shell.user_ns[ferretvariable], axis_pos=axis_pos_variable) publish_display_data({'text/html': '<pre style="background-color:#F2F5A9; border-radius: 4px 4px 4px 4px; font-size: smaller">' + 'Message: ' + ferretvariable + ' is now available in ferret as ' + self.shell.user_ns[ferretvariable]['name'] + '</pre>' })
def pprofile(self, line, cell=''): """PProfile IPython extension""" args = parse_argstring(self.pprofile, line) # set the output directory in which to store the profile output and create it output_dir = get_arg(args.name, unicode_to_str) output_dir = driver.create_output_dir(output_dir) # get the width and height width = get_arg(args.width, int) height = get_arg(args.height, int) # get the code command_line_code = ' '.join(args.code) + '\n' if len( args.code) > 0 else '' code = unicode_to_str(command_line_code + cell) # compile the code block and execute the code block ccode = driver.compile_code(code, output_dir) self.shell.run_code(ccode) # process the profile output and return an IFrame to display the profile information in uri = driver.process_profile(output_dir) display(IFrame(uri, width=width, height=height))
def complete_request(self, text): line = str_to_unicode(readline.get_line_buffer()) byte_cursor_pos = readline.get_endidx() # get_endidx is a byte offset # account for multi-byte characters to get correct cursor_pos bytes_before_cursor = cast_bytes(line)[:byte_cursor_pos] cursor_pos = len(cast_unicode(bytes_before_cursor)) # send completion request to kernel # Give the kernel up to 5s to respond msg_id = self.client.complete( code=line, cursor_pos=cursor_pos, ) msg = self.client.shell_channel.get_msg(timeout=self.timeout) if msg['parent_header']['msg_id'] == msg_id: content = msg['content'] cursor_start = content['cursor_start'] matches = [ line[:cursor_start] + m for m in content['matches'] ] if content["cursor_end"] < cursor_pos: extra = line[content["cursor_end"]: cursor_pos] matches = [m + extra for m in matches] matches = [ unicode_to_str(m) for m in matches ] return matches return []
def shebang(line, cell): cmd = shlex.split(unicode_to_str(line)) p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE) out,err = p.communicate(cell) if err: print >> sys.stderr, err print out
def test_replace_multiline_hist_replaces_empty_line(self): """Test that multiline history skips empty line cells""" ip = get_ipython() ip.multiline_history = True ip.readline.add_history('line0') #start cell hlen_b4_cell = ip.readline.get_current_history_length() ip.readline.add_history('l€ne1') ip.readline.add_history('line2') hlen_b4_cell = ip._replace_rlhist_multiline('l€ne1\nline2', hlen_b4_cell) ip.readline.add_history('') hlen_b4_cell = ip._replace_rlhist_multiline('', hlen_b4_cell) ip.readline.add_history('l€ne3') hlen_b4_cell = ip._replace_rlhist_multiline('l€ne3', hlen_b4_cell) ip.readline.add_history(' ') hlen_b4_cell = ip._replace_rlhist_multiline(' ', hlen_b4_cell) ip.readline.add_history('\t') ip.readline.add_history('\t ') hlen_b4_cell = ip._replace_rlhist_multiline('\t', hlen_b4_cell) ip.readline.add_history('line4') hlen_b4_cell = ip._replace_rlhist_multiline('line4', hlen_b4_cell) self.assertEqual(ip.readline.get_current_history_length(), hlen_b4_cell) hist = self.rl_hist_entries(ip.readline, 4) # expect no empty cells in history expected = ['line0', 'l€ne1\nline2', 'l€ne3', 'line4'] # perform encoding, in case of casting due to ASCII locale enc = sys.stdin.encoding or "utf-8" expected = [ py3compat.unicode_to_str(e, enc) for e in expected ] self.assertEqual(hist, expected)
def test_replace_multiline_hist_replaces_twice(self): """Test that multiline entries are replaced twice""" ip = get_ipython() ip.multiline_history = True ip.readline.add_history(u'line0') #start cell hlen_b4_cell = ip.readline.get_current_history_length() ip.readline.add_history('l€ne1') ip.readline.add_history('line2') #replace cell with single line hlen_b4_cell = ip._replace_rlhist_multiline(u'l€ne1\nline2', hlen_b4_cell) ip.readline.add_history('l€ne3') ip.readline.add_history('line4') #replace cell with single line hlen_b4_cell = ip._replace_rlhist_multiline(u'l€ne3\nline4', hlen_b4_cell) self.assertEqual(ip.readline.get_current_history_length(), hlen_b4_cell) hist = self.rl_hist_entries(ip.readline, 3) expected = [u'line0', u'l€ne1\nline2', u'l€ne3\nline4'] # perform encoding, in case of casting due to ASCII locale enc = sys.stdin.encoding or "utf-8" expected = [py3compat.unicode_to_str(e, enc) for e in expected] self.assertEqual(hist, expected)
def test_replace_multiline_hist_replaces_empty_line(self): """Test that multiline history skips empty line cells""" ip = get_ipython() ip.multiline_history = True ip.readline.add_history('line0') #start cell hlen_b4_cell = ip.readline.get_current_history_length() ip.readline.add_history('l€ne1') ip.readline.add_history('line2') hlen_b4_cell = ip._replace_rlhist_multiline('l€ne1\nline2', hlen_b4_cell) ip.readline.add_history('') hlen_b4_cell = ip._replace_rlhist_multiline('', hlen_b4_cell) ip.readline.add_history('l€ne3') hlen_b4_cell = ip._replace_rlhist_multiline('l€ne3', hlen_b4_cell) ip.readline.add_history(' ') hlen_b4_cell = ip._replace_rlhist_multiline(' ', hlen_b4_cell) ip.readline.add_history('\t') ip.readline.add_history('\t ') hlen_b4_cell = ip._replace_rlhist_multiline('\t', hlen_b4_cell) ip.readline.add_history('line4') hlen_b4_cell = ip._replace_rlhist_multiline('line4', hlen_b4_cell) self.assertEquals(ip.readline.get_current_history_length(), hlen_b4_cell) hist = self.rl_hist_entries(ip.readline, 4) # expect no empty cells in history expected = ['line0', 'l€ne1\nline2', 'l€ne3', 'line4'] # perform encoding, in case of casting due to ASCII locale enc = sys.stdin.encoding or "utf-8" expected = [py3compat.unicode_to_str(e, enc) for e in expected] self.assertEquals(hist, expected)
def url_escape(path): """Escape special characters in a URL path Turns '/foo bar/' into '/foo%20bar/' """ parts = py3compat.unicode_to_str(path).split("/") return u"/".join([quote(p) for p in parts])
def init_settings(self, ipython_app, kernel_manager, contents_manager, cluster_manager, session_manager, kernel_spec_manager, log, base_url, default_url, settings_overrides, jinja_env_options=None): # Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and # base_url will always be unicode, which will in turn # make the patterns unicode, and ultimately result in unicode # keys in kwargs to handler._execute(**kwargs) in tornado. # This enforces that base_url be ascii in that situation. # # Note that the URLs these patterns check against are escaped, # and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'. base_url = py3compat.unicode_to_str(base_url, 'ascii') template_path = settings_overrides.get( "template_path", os.path.join(os.path.dirname(__file__), "templates")) jenv_opt = jinja_env_options if jinja_env_options else {} env = Environment(loader=FileSystemLoader(template_path), **jenv_opt) settings = dict( # basics log_function=log_request, base_url=base_url, default_url=default_url, template_path=template_path, static_path=ipython_app.static_file_path, static_handler_class=FileFindHandler, static_url_prefix=url_path_join(base_url, '/static/'), # authentication cookie_secret=ipython_app.cookie_secret, login_url=url_path_join(base_url, '/login'), password=ipython_app.password, # managers kernel_manager=kernel_manager, contents_manager=contents_manager, cluster_manager=cluster_manager, session_manager=session_manager, kernel_spec_manager=kernel_spec_manager, # IPython stuff nbextensions_path=ipython_app.nbextensions_path, websocket_url=ipython_app.websocket_url, mathjax_url=ipython_app.mathjax_url, config=ipython_app.config, jinja2_env=env, ) # allow custom overrides for the tornado web app. settings.update(settings_overrides) return settings
def compress_user(path): """Reverse of :func:`os.path.expanduser` """ path = py3compat.unicode_to_str(path, sys.getfilesystemencoding()) home = os.path.expanduser('~') if path.startswith(home): path = "~" + path[len(home):] return path
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell): """Store multiple lines as a single entry in history""" if self.multiline_history and self.has_readline: hlen = self.readline.get_current_history_length() for i in range(hlen - hlen_before_cell): self.readline.remove_history_item(hlen - i - 1) stdin_encoding = sys.stdin.encoding or "utf-8" self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(), stdin_encoding))
def url_unescape(path): """Unescape special characters in a URL path Turns '/foo%20bar/' into '/foo bar/' """ return u'/'.join([ py3compat.str_to_unicode(unquote(p), encoding='utf8') for p in py3compat.unicode_to_str(path, encoding='utf8').split('/') ])
def url_unescape(path): """Unescape special characters in a URL path Turns '/foo%20bar/' into '/foo bar/' """ return u'/'.join([ py3compat.str_to_unicode(unquote(p)) for p in py3compat.unicode_to_str(path).split('/') ])
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell): """Store multiple lines as a single entry in history""" if self.multiline_history and self.has_readline: hlen = self.readline.get_current_history_length() for i in range(hlen - hlen_before_cell): self.readline.remove_history_item(hlen - i - 1) stdin_encoding = sys.stdin.encoding or "utf-8" self.readline.add_history( py3compat.unicode_to_str(source_raw.rstrip(), stdin_encoding))
def init_settings( self, ipython_app, kernel_manager, contents_manager, cluster_manager, session_manager, kernel_spec_manager, log, base_url, default_url, settings_overrides, jinja_env_options=None, ): # Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and # base_url will always be unicode, which will in turn # make the patterns unicode, and ultimately result in unicode # keys in kwargs to handler._execute(**kwargs) in tornado. # This enforces that base_url be ascii in that situation. # # Note that the URLs these patterns check against are escaped, # and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'. base_url = py3compat.unicode_to_str(base_url, "ascii") template_path = settings_overrides.get("template_path", os.path.join(os.path.dirname(__file__), "templates")) jenv_opt = jinja_env_options if jinja_env_options else {} env = Environment(loader=FileSystemLoader(template_path), **jenv_opt) settings = dict( # basics log_function=log_request, base_url=base_url, default_url=default_url, template_path=template_path, static_path=ipython_app.static_file_path, static_handler_class=FileFindHandler, static_url_prefix=url_path_join(base_url, "/static/"), # authentication cookie_secret=ipython_app.cookie_secret, login_url=url_path_join(base_url, "/login"), password=ipython_app.password, # managers kernel_manager=kernel_manager, contents_manager=contents_manager, cluster_manager=cluster_manager, session_manager=session_manager, kernel_spec_manager=kernel_spec_manager, # IPython stuff nbextensions_path=ipython_app.nbextensions_path, websocket_url=ipython_app.websocket_url, mathjax_url=ipython_app.mathjax_url, config=ipython_app.config, jinja2_env=env, ) # allow custom overrides for the tornado web app. settings.update(settings_overrides) return settings
def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0): """Show detailed information about an object. Optional arguments: - oname: name of the variable pointing to the object. - formatter: special formatter for docstrings (see pdoc) - info: a structure with some information fields which may have been precomputed already. - detail_level: if set to 1, more information is given. """ info = self.info(obj, oname=oname, formatter=formatter, info=info, detail_level=detail_level) displayfields = [] for title, key in self.pinfo_fields1: field = info[key] if field is not None: displayfields.append((title, field.rstrip())) # Source or docstring, depending on detail level and whether # source found. if detail_level > 0 and info['source'] is not None: displayfields.append( ("Source", self.format(py3compat.unicode_to_str(info['source'])))) elif info['docstring'] is not None: displayfields.append(("Docstring", info["docstring"])) # Constructor info for classes if info['isclass']: if info['init_definition'] or info['init_docstring']: displayfields.append(("Constructor information", "")) if info['init_definition'] is not None: displayfields.append( (" Definition", info['init_definition'].rstrip())) if info['init_docstring'] is not None: displayfields.append( (" Docstring", indent(info['init_docstring']))) # Info for objects: else: for title, key in self.pinfo_fields_obj: field = info[key] if field is not None: displayfields.append((title, field.rstrip())) # Finally send to printer/pager: if displayfields: page.page(self._format_fields(displayfields))
def psource(self, obj, oname=''): """Print the source code for an object.""" # Flush the source cache because inspect can return out-of-date source linecache.checkcache() try: src = getsource(obj) except: self.noinfo('source', oname) else: page.page(self.format(py3compat.unicode_to_str(src)))
def psource(self,obj,oname=''): """Print the source code for an object.""" # Flush the source cache because inspect can return out-of-date source linecache.checkcache() try: src = getsource(obj) except: self.noinfo('source',oname) else: page.page(self.format(py3compat.unicode_to_str(src)))
def _getdef(self, obj, oname=''): """Return the definition header for any callable object. If any exception is generated, None is returned instead and the exception is suppressed.""" try: # We need a plain string here, NOT unicode! hdef = oname + inspect.formatargspec(*getargspec(obj)) return py3compat.unicode_to_str(hdef, 'ascii') except: return None
def lpy_rule(self, line): ''' TODO : Update the doc string Line-level magic that pulls a variable from Lpy. In [1]: %lpy_axiom 'A' In [2]: %lpy_rule 'A --> F A' In [3]: %%lpy -n 10 ''' rule = unicode_to_str(line) self._lsys.addRule(rule)
def _getdef(self,obj,oname=''): """Return the definition header for any callable object. If any exception is generated, None is returned instead and the exception is suppressed.""" try: # We need a plain string here, NOT unicode! hdef = oname + inspect.formatargspec(*getargspec(obj)) return py3compat.unicode_to_str(hdef, 'ascii') except: return None
def test_unicode_repr(): u = u"üniçodé" ustr = unicode_to_str(u) class C(object): def __repr__(self): return ustr c = C() p = pretty.pretty(c) nt.assert_equal(p, u) p = pretty.pretty([c]) nt.assert_equal(p, u'[%s]' % u)
def pinfo(self,obj,oname='',formatter=None,info=None,detail_level=0): """Show detailed information about an object. Optional arguments: - oname: name of the variable pointing to the object. - formatter: special formatter for docstrings (see pdoc) - info: a structure with some information fields which may have been precomputed already. - detail_level: if set to 1, more information is given. """ info = self.info(obj, oname=oname, formatter=formatter, info=info, detail_level=detail_level) displayfields = [] for title, key in self.pinfo_fields1: field = info[key] if field is not None: displayfields.append((title, field.rstrip())) # Source or docstring, depending on detail level and whether # source found. if detail_level > 0 and info['source'] is not None: displayfields.append(("Source", self.format(py3compat.unicode_to_str(info['source'])))) elif info['docstring'] is not None: displayfields.append(("Docstring", info["docstring"])) # Constructor info for classes if info['isclass']: if info['init_definition'] or info['init_docstring']: displayfields.append(("Constructor information", "")) if info['init_definition'] is not None: displayfields.append((" Definition", info['init_definition'].rstrip())) if info['init_docstring'] is not None: displayfields.append((" Docstring", indent(info['init_docstring']))) # Info for objects: else: for title, key in self.pinfo_fields_obj: field = info[key] if field is not None: displayfields.append((title, field.rstrip())) # Finally send to printer/pager: if displayfields: page.page(self._format_fields(displayfields))
def _run_cmd(self): # On Windows, use os.system instead of subprocess.call, because I # was having problems with subprocess and I just don't know enough # about win32 to debug this reliably. Os.system may be the 'old # fashioned' way to do it, but it works just fine. If someone # later can clean this up that's fine, as long as the tests run # reliably in win32. # What types of problems are you having. They may be related to # running Python in unboffered mode. BG. for ndx, arg in enumerate(self.call_args): # Enclose in quotes if necessary and legal if ' ' in arg and os.path.isfile(arg) and arg[0] != '"': self.call_args[ndx] = '"%s"' % arg call_args = [py3compat.cast_unicode(x) for x in self.call_args] cmd = py3compat.unicode_to_str(u' '.join(call_args)) return os.system(cmd)
def eval(self, line): ''' Parse and evaluate a line with rpy2. Returns the output to R's stdout() connection and the value of eval(parse(line)). ''' old_writeconsole = ri.get_writeconsole() ri.set_writeconsole(self.write_console) try: value = ri.baseenv['eval'](ri.parse(line)) except (ri.RRuntimeError, ValueError) as exception: warning_or_other_msg = self.flush() # otherwise next return seems to have copy of error raise RMagicError(unicode_to_str('parsing and evaluating line "%s".\nR error message: "%s"\n R stdout:"%s"\n' % (line, str_to_unicode(exception.message, 'utf-8'), warning_or_other_msg))) text_output = self.flush() ri.set_writeconsole(old_writeconsole) return text_output, value
def init_settings(self, ipython_app, kernel_manager, notebook_manager, cluster_manager, log, base_project_url, settings_overrides): # Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and # base_project_url will always be unicode, which will in turn # make the patterns unicode, and ultimately result in unicode # keys in kwargs to handler._execute(**kwargs) in tornado. # This enforces that base_project_url be ascii in that situation. # # Note that the URLs these patterns check against are escaped, # and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'. base_project_url = py3compat.unicode_to_str(base_project_url, 'ascii') template_path = os.path.join(os.path.dirname(__file__), "templates") settings = dict( # basics base_project_url=base_project_url, base_kernel_url=ipython_app.base_kernel_url, template_path=template_path, static_path=ipython_app.static_file_path, static_handler_class = FileFindHandler, static_url_prefix = url_path_join(base_project_url,'/static/'), # authentication cookie_secret=os.urandom(1024), login_url=url_path_join(base_project_url,'/login'), cookie_name='username-%s' % uuid.uuid4(), read_only=ipython_app.read_only, password=ipython_app.password, # managers kernel_manager=kernel_manager, notebook_manager=notebook_manager, cluster_manager=cluster_manager, # IPython stuff mathjax_url=ipython_app.mathjax_url, max_msg_size=ipython_app.max_msg_size, config=ipython_app.config, use_less=ipython_app.use_less, jinja2_env=Environment(loader=FileSystemLoader(template_path)), ) # allow custom overrides for the tornado web app. settings.update(settings_overrides) return settings
def __call__(self, obj): """Compute the pretty representation of the object.""" if not self.pprint: return pretty._safe_repr(obj) else: # This uses use StringIO, as cStringIO doesn't handle unicode. stream = StringIO() # self.newline.encode() is a quick fix for issue gh-597. We need to # ensure that stream does not get a mix of unicode and bytestrings, # or it will cause trouble. printer = pretty.RepresentationPrinter(stream, self.verbose, self.max_width, unicode_to_str(self.newline), singleton_pprinters=self.singleton_printers, type_pprinters=self.type_printers, deferred_pprinters=self.deferred_printers) printer.pretty(obj) printer.flush() return stream.getvalue()
def lpy_axiom(self, line): ''' Line-level magic that define the Lsystm Axiom to Lpy. `line` should be made up of a string or an AxialTree available in the IPython namespace:: In [1]: X = 'F(10)[(+30)F(1)]A' In [10]: %lpy_axiom X In [11]: %%lpy -n 10 Out[11]: 2.0 ''' axiom = line axiom = unicode_to_str(axiom) self._lsys.axiom = axiom
def ghci_push(self, line): ''' Line-level magic that pushes a variable to ghci. `line` should be made up of whitespace separated variable names in the IPython namespace:: In [7]: x = 1 In [8]: %ghci_push x In [11]: %ghci x + 2 Out[11]: 3 ''' inputs = line.split(' ') for input in inputs: input = unicode_to_str(input) self._ghci.put(input, self.shell.user_ns[input])
def _raw_input(self, prompt, ident, parent): # Flush output before making the request. sys.stderr.flush() sys.stdout.flush() # flush the stdin socket, to purge stale replies while True: try: self.stdin_socket.recv_multipart(zmq.NOBLOCK) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: break else: raise # Send the input request. content = json_clean(dict(prompt=prompt)) self.session.send(self.stdin_socket, u'input_request', content, parent, ident=ident) # Await a response. while True: try: ident, reply = self.session.recv(self.stdin_socket, 0) except Exception: self.log.warn("Invalid Message:", exc_info=True) except KeyboardInterrupt: # re-raise KeyboardInterrupt, to truncate traceback raise KeyboardInterrupt else: break try: value = py3compat.unicode_to_str(reply['content']['value']) except: self.log.error("Got bad raw_input reply: ") self.log.error("%s", parent) value = '' if value == '\x04': # EOF raise EOFError return value
def __call__(self, obj): """ Compute the pretty representation of the object. Adapted from ``IPython.core.formatters.PlainTextPrettyPrint``. INPUT: - ``obj`` -- anything. OUTPUT: String. The plain text representation. EXAMPLES:: sage: from sage.repl.interpreter import get_test_shell sage: shell = get_test_shell() sage: fmt = shell.display_formatter.formatters['text/plain'] sage: fmt <sage.repl.display.formatter.SagePlainTextFormatter object at 0x...> sage: shell.displayhook.compute_format_data(2) ({u'text/plain': '2'}, {}) sage: a = identity_matrix(ZZ, 2) sage: shell.displayhook.compute_format_data([a,a]) ({u'text/plain': '[\n[1 0] [1 0]\n[0 1], [0 1]\n]'}, {}) sage: fmt.set_display('ascii_art') sage: shell.displayhook.compute_format_data([a,a]) ({u'text/plain': '[ [1 0] [1 0] ]\n[ [0 1], [0 1] ]'}, {}) sage: i = var('i') sage: shell.displayhook.compute_format_data(sum(i*x^i, i, 0, 10)) ({u'text/plain': ' 10 9 8 7 6 5 4 3 2 \n10*x + 9*x + 8*x + 7*x + 6*x + 5*x + 4*x + 3*x + 2*x + x'}, {}) sage: fmt.set_display('simple') """ import StringIO stream = StringIO.StringIO() printer = self._pretty_printer_class( stream, self.max_width, unicode_to_str(self.newline)) printer.pretty(obj) printer.flush() return stream.getvalue()
def idl_pull(self, line): ''' Line-level magic that pulls a variable from IDL. In [18]: %idl x = [1, 2, 3, 4] & y = 'hello' In [19]: %idl_pull x y In [20]: x Out[20]: array([ 1, 2, 3, 4], dtype=int16) In [21]: y Out[21]: array('hello', dtype='|S5') ''' outputs = line.split(' ') for output in outputs: output = unicode_to_str(output) self.shell.push({output: self._idl.ev(output)})
def octave_pull(self, line): ''' Line-level magic that pulls a variable from Octave. In [18]: _ = %octave x = [1 2; 3 4]; y = 'hello' In [19]: %octave_pull x y In [20]: x Out[20]: array([[ 1., 2.], [ 3., 4.]]) In [21]: y Out[21]: 'hello' ''' outputs = line.split(' ') for output in outputs: output = unicode_to_str(output) self.shell.push({output: self._oct.get(output)})
def mathematica_pull(self, line): ''' Line-level magic that pulls a variable from Mathematica. In [18]: _ = %mathematica x = {{1,2}, {3,4}}; y = 'hello'; In [19]: %mathematica_pull x y In [20]: x Out[20]: array([[ 1., 2.], [ 3., 4.]]) In [21]: y Out[21]: 'hello' ''' outputs = line.split(' ') for output in outputs: output = unicode_to_str(output) self.shell.push({output: self._mathematica.pull(output)})