def graphlog(ui, repo, path=None, **opts): """show revision history alongside an ASCII revision graph Print a revision history alongside a revision graph drawn with ASCII characters. Nodes printed as an @ character are parents of the working directory. """ check_unsupported_flags(opts) limit = cmdutil.loglimit(opts) start, stop = get_revs(repo, opts["rev"]) stop = max(stop, start - limit + 1) if start == nullrev: return if path: path = util.canonpath(repo.root, os.getcwd(), path) if path: # could be reset in canonpath revdag = graphmod.filerevs(repo, path, start, stop) else: revdag = graphmod.revisions(repo, start, stop) fmtdag = asciiformat(ui, repo, revdag, opts) ascii(ui, asciiedges(fmtdag))
def test_doctest_main_issue4197(self): test_src = textwrap.dedent("""\ class Test: ">>> 'line 2'" pass import doctest doctest.testmod() """) pattern = 'File "%s", line 2, in %s' with test.support.temp_dir() as d: script_name = make_script(d, 'script', test_src) rc, out, err = assert_python_ok(script_name) expected = pattern % (script_name, "__main__.Test") if verbose: print ("Expected line", expected) print ("Got stdout:") print (ascii(out)) self.assertIn(expected.encode('utf-8'), out) zip_name, run_name = make_zip_script(d, "test_zip", script_name, '__main__.py') rc, out, err = assert_python_ok(zip_name) expected = pattern % (run_name, "__main__.Test") if verbose: print ("Expected line", expected) print ("Got stdout:") print (ascii(out)) self.assertIn(expected.encode('utf-8'), out)
def _pc_dumpchannicks(self, ctx, chan): chn = self.bnc.nc.conn.channels[chan] nicks = list(chn.users.keys()) nicks.sort() ctx.output(ascii(len(nicks)).encode('ascii')) text = ascii(nicks).encode('ascii') ctx.output(text)
def goutgoing(ui, repo, dest=None, **opts): """show the outgoing changesets alongside an ASCII revision graph Print the outgoing changesets alongside a revision graph drawn with ASCII characters. Nodes printed as an @ character are parents of the working directory. """ check_unsupported_flags(opts) dest, revs, checkout = hg.parseurl( ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev')) if revs: revs = [repo.lookup(rev) for rev in revs] other = hg.repository(cmdutil.remoteui(ui, opts), dest) ui.status(_('comparing with %s\n') % url.hidepassword(dest)) o = repo.findoutgoing(other, force=opts.get('force')) if not o: ui.status(_("no changes found\n")) return o = repo.changelog.nodesbetween(o, revs)[0] revdag = graphrevs(repo, o, opts) fmtdag = asciiformat(ui, repo, revdag, opts) ascii(ui, asciiedges(fmtdag))
def compilecomparison(self, comp): name, code, comment = comp iname = identify(name) self.namemappers[0].addnamecode('comparison', iname, code) if self.fp: self.fp.write('class %s(aetools.NComparison):\n' % iname) self.fp.write(' """%s - %s """\n' % (ascii(name), ascii(comment)))
def check_hdf5_file(filename, ref_all_paths, ref_deps): h5file = h5py.File(filename, "r") all_paths = [] h5file.visit(all_paths.append) all_paths.sort() assert all_paths == ref_all_paths assert_valid_paper(h5file) assert_almost_equal(h5file["data/frequency"][...], 0.2, 1.e-15) assert_almost_equal(h5file["data/time"][...], 0.1*np.arange(100), 1.e-15) assert_almost_equal(h5file["data/sine"][...], np.sin(0.04*np.pi*np.arange(100)), 1.e-10) for path in ['data/frequency', 'data/sine', 'data/time']: assert h5file[path].attrs['ACTIVE_PAPER_DATATYPE'] == "data" assert h5file[path].attrs['ACTIVE_PAPER_TIMESTAMP'] > 1.e9 for path in ['code/calc_sine']: assert h5file[path].attrs['ACTIVE_PAPER_DATATYPE'] == "calclet" deps = h5file["data/sine"].attrs['ACTIVE_PAPER_DEPENDENCIES'] assert list(ascii(p) for p in deps) \ == [ascii(p) for p in ref_deps] assert h5file["data/sine"].attrs['ACTIVE_PAPER_GENERATING_CODELET'] \ == "/code/calc_sine" h5file.close()
def check_exit_message(code, expected, env=None): process = subprocess.Popen([sys.executable, "-c", code], stderr=subprocess.PIPE, env=env) stdout, stderr = process.communicate() self.assertEqual(process.returncode, 1) self.assertTrue(stderr.startswith(expected), "%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
def test_issue3297(self): c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec") d = {} exec(c, d) self.assertEqual(d['a'], d['b']) self.assertEqual(len(d['a']), len(d['b'])) self.assertEqual(ascii(d['a']), ascii(d['b']))
def log_trace(self, tag, original, loc, tokens=None, extra=None): """Formats and displays a trace if tracing.""" if self.tracing: tag, original, loc = displayable(tag), displayable(original), int(loc) if "{" not in tag: out = ["[" + tag + "]"] add_line_col = True if tokens is not None: if isinstance(tokens, Exception): msg = displayable(str(tokens)) if "{" in msg: head, middle = msg.split("{", 1) middle, tail = middle.rsplit("}", 1) msg = head + "{...}" + tail out.append(msg) add_line_col = False elif len(tokens) == 1 and isinstance(tokens[0], str): out.append(ascii(tokens[0])) else: out.append(ascii(tokens)) if add_line_col: out.append("(line:" + str(lineno(loc, original)) + ", col:" + str(col(loc, original)) + ")") if extra is not None: out.append("from " + ascii(extra)) printerr(*out)
def test_undecodable_env(self): for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')): # test str with surrogates script = "import os; print(ascii(os.getenv(%s)))" % repr(key) env = os.environ.copy() env[key] = value # Use C locale to get ascii for the locale encoding to force # surrogate-escaping of \xFF in the child process; otherwise it can # be decoded as-is if the default locale is latin-1. env['LC_ALL'] = 'C' stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEquals(stdout.decode('ascii'), ascii(value)) # test bytes key = key.encode("ascii", "surrogateescape") value = value.encode("ascii", "surrogateescape") script = "import os; print(ascii(os.getenvb(%s)))" % repr(key) env = os.environ.copy() env[key] = value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEquals(stdout.decode('ascii'), ascii(value))
def _store_property(self, path, property): universe = property.universe universe_path = self._get_path(universe) if universe_path is None: raise IOError("universe must be stored first") element_shape = property.data.shape[1:] if element_shape: dtype = N.dtype((property.data.dtype, element_shape)) else: dtype = property.data.dtype arr = N.ascontiguousarray(property.data) ds = self.root.require_dataset(path, shape=(len(arr),), dtype=dtype) self._stamp(ds, 'property') ds.attrs['universe'] = self.root[universe_path].ref ds.attrs['name'] = ascii(property.name) ds.attrs['units'] = ascii(property.units) ds.attrs['property_type'] = ascii(property.type) # There doesn't seem to be any way to write this array # using high-level operations, so we use the low-level access. mtype = h5py.h5t.py_create(ds.id.dtype) mspace = h5py.h5s.create_simple(ds.shape) fspace = ds.id.get_space() ds.id.write(mspace, fspace, arr, mtype)
def test_ascii(self): self.assertEqual(ascii(''), '\'\'') self.assertEqual(ascii(0), '0') self.assertEqual(ascii(0), '0') self.assertEqual(ascii(()), '()') self.assertEqual(ascii([]), '[]') self.assertEqual(ascii({}), '{}') a = [] a.append(a) self.assertEqual(ascii(a), '[[...]]') a = {} a[0] = a self.assertEqual(ascii(a), '{0: {...}}') # Advanced checks for unicode strings def _check_uni(s): self.assertEqual(ascii(s), repr(s)) _check_uni("'") _check_uni('"') _check_uni('"\'') _check_uni('\0') _check_uni('\r\n\t .') # Unprintable non-ASCII characters _check_uni('\x85') _check_uni('\u1fff') _check_uni('\U00012fff') # Lone surrogates _check_uni('\ud800') _check_uni('\udfff') # Issue #9804: surrogates should be joined even for printable # wide characters (UCS-2 builds). self.assertEqual(ascii('\U0001d121'), "'\\U0001d121'") # All together s = "'\0\"\n\r\t abcd\x85é\U00012fff\uD800\U0001D121xxx." self.assertEqual(ascii(s), r"""'\'\x00"\n\r\t abcd\x85\xe9\U00012fff\ud800\U0001d121xxx.'""")
def scan_number(self): found_a_digit = False found_a_decimal_point = False whole_part = 0 fraction_part = 0.0 fraction_multiplier = 0.1 sign = 1 if self.next_char == '+': self.advance_cursor() elif self.next_char == '-': sign = -1 self.advance_cursor() while not self.end_of_expr and ((self.next_char == '.') or self.next_char.isdigit()): if self.next_char == '.': if found_a_decimal_point: ScannerException.message = "badly formed number - multiple decimal points" raise ScannerException found_a_decimal_point = True else: found_a_digit = True if not found_a_decimal_point: whole_part = whole_part * 10 + int(self.next_char) - int(ascii(0)) else: fraction_part += ((int(self.next_char) - int(ascii(0))) * fraction_multiplier) fraction_multiplier /= 10 self.advance_cursor() if not found_a_digit: ScannerException.message = "badly formed number - no digits" raise ScannerException if found_a_decimal_point: return RealToken(sign * (whole_part + fraction_part)) else: return IntegerToken(sign * whole_part)
def scan(self, cursor=0, pattern=None, count=None): """The :meth:`~tredis.RedisClient.scan` command and the closely related commands :meth:`~tredis.RedisClient.sscan`, :meth:`~tredis.RedisClient.hscan` and :meth:`~tredis.RedisClient.zscan` are used in order to incrementally iterate over a collection of elements. - :meth:`~tredis.RedisClient.scan` iterates the set of keys in the currently selected Redis database. - :meth:`~tredis.RedisClient.sscan` iterates elements of Sets types. - :meth:`~tredis.RedisClient.hscan` iterates fields of Hash types and their associated values. - :meth:`~tredis.RedisClient.zscan` iterates elements of Sorted Set types and their associated scores. **Basic usage** :meth:`~tredis.RedisClient.scan` is a cursor based iterator. This means that at every call of the command, the server returns an updated cursor that the user needs to use as the cursor argument in the next call. An iteration starts when the cursor is set to ``0``, and terminates when the cursor returned by the server is ``0``. For more information on :meth:`~tredis.RedisClient.scan`, visit the `Redis docs on scan <http://redis.io/commands/scan>`_. .. note:: **Time complexity**: ``O(1)`` for every call. ``O(N)`` for a complete iteration, including enough command calls for the cursor to return back to ``0``. ``N`` is the number of elements inside the collection. :param int cursor: The server specified cursor value or ``0`` :param pattern: An optional pattern to apply for key matching :type pattern: :class:`str`, :class:`bytes` :param int count: An optional amount of work to perform in the scan :rtype: int, list :returns: A tuple containing the cursor and the list of keys :raises: :exc:`~tredis.exceptions.RedisError` """ def format_response(value): """Format the response from redis :param tuple value: The return response from redis :rtype: tuple(int, list) """ return int(value[0]), value[1] command = [b'SCAN', ascii(cursor).encode('ascii')] if pattern: command += [b'MATCH', pattern] if count: command += [b'COUNT', ascii(count).encode('ascii')] return self._execute(command, format_callback=format_response)
def generate(ui, dag, displayer, showparents, edgefn): seen, state = [], asciistate() for rev, type, ctx, parents in dag: char = ctx.node() in showparents and '@' or 'o' displayer.show(ctx) lines = displayer.hunk.pop(rev).split('\n')[:-1] ascii(ui, state, type, char, lines, edgefn(seen, rev, parents))
def removeSomeReviews(c): rows = c.getSession().execute('SELECT id FROM prs.users where reviews_filtered=false') for row in rows: logging.info("Getting reviews for user %s", row['id']) user=models.Users.get(id=row['id']) list=user.list_reviews if(list is None): user.reviews_filtered=True user.save() continue list.sort(key=lambda r: r.rating, reverse=True) index=0 for i, r in enumerate(list): if(r.rating<3): index=i break if(index>0): list[index:] = [] list[500:] = [] user.list_reviews=[] for r in list: ac=models.user(id=r.actor.id, gid=r.actor.gid, name=r.actor.name, friends_count=r.actor.friends_count, reviews_count= r.actor.reviews_count, age=r.actor.age, gender=r.actor.gender, small_user=r.actor.small_user, private=r.actor.private) bk=models.book(id=r.book.id, gid=r.book.gid, title=ascii(r.book.title),isbn=r.book.isbn, isbn13=r.book.isbn13, publication_date=r.book.publication_date, average_rating=r.book.average_rating, ratings_count=r.book.ratings_count, small_book=r.book.small_book, author=r.book.author) user.list_reviews.append(models.review(id=r.id, actor=ac, book=bk, gid=r.gid, rating=r.rating, text=ascii(r.text))) user.reviews_count=len(user.list_reviews) user.reviews_filtered=True user.save() logging.info("Finish removing some reviews for user %s", row['id'])
def test_issue3297(self): c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec") d = {} exec(c, d) assert d['a'] == d['b'] assert len(d['a']) == len(d['b']) assert ascii(d['a']) == ascii(d['b'])
def wait(self, num_slaves, timeout=0): """his command blocks the current client until all the previous write commands are successfully transferred and acknowledged by at least the specified number of slaves. If the timeout, specified in milliseconds, is reached, the command returns even if the specified number of slaves were not yet reached. The command will always return the number of slaves that acknowledged the write commands sent before the :meth:`~tredis.RedisClient.wait` command, both in the case where the specified number of slaves are reached, or when the timeout is reached. .. note:: **Time complexity**: ``O(1)`` :param int num_slaves: Number of slaves to acknowledge previous writes :param int timeout: Timeout in milliseconds :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ command = [b'WAIT', ascii(num_slaves).encode('ascii'), ascii(timeout).encode('ascii')] return self._execute(command)
def nor_unicode(): s1 = 'Spicy Jalape\u00f1o' s2 = 'Spicy Jalapen\u0303o' print(s1, s2) print(s1 == s2) print(len(s1), len(s2)) # 先将文本标准化表示 t1 = unicodedata.normalize('NFC', s1) t2 = unicodedata.normalize('NFC', s2) print(t1 == t2) print(ascii(t1)) t3 = unicodedata.normalize('NFD', s1) t4 = unicodedata.normalize('NFD', s2) print(t3 == t4) print(ascii(t3)) # 扩展的NFKC和NFKD s = '\ufb01' # A single character print(s, len(s)) print(unicodedata.normalize('NFD', s), len(unicodedata.normalize('NFD', s))) print(unicodedata.normalize('NFKC', s), len(unicodedata.normalize('NFKC', s))) print(unicodedata.normalize('NFKD', s), len(unicodedata.normalize('NFKD', s))) # 消除变音符 t1 = unicodedata.normalize('NFD', s1) print(''.join(c for c in t1 if not unicodedata.combining(c)))
def get_feature(smokey_json, feature_name): for feature_json in smokey_json: if feature_json['uri'] == get_feature_uri(feature_name): return Feature( ascii(feature_json['name']), ascii(feature_json['uri']), map(get_scenario, find_scenarios(feature_json)))
def getrange(self, key, start, end): """Returns the bit value at offset in the string value stored at key. When offset is beyond the string length, the string is assumed to be a contiguous space with 0 bits. When key does not exist it is assumed to be an empty string, so offset is always out of range and the value is also assumed to be a contiguous space with 0 bits. .. versionadded:: 0.2.0 .. note:: **Time complexity**: ``O(N)`` where ``N`` is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered ``O(1)`` for small strings. :param key: The key to get the bit from :type key: :class:`str`, :class:`bytes` :param int start: The start position to evaluate in the string :param int end: The end position to evaluate in the string :rtype: bytes|None :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'GETRANGE', key, ascii(start), ascii(end)])
def set(self, key, value, ex=None, px=None, nx=False, xx=False): """Set key to hold the string value. If key already holds a value, it is overwritten, regardless of its type. Any previous time to live associated with the key is discarded on successful :meth:`~tredis.RedisClient.set` operation. If the value is not one of :class:`str`, :class:`bytes`, or :class:`int`, a :exc:`ValueError` will be raised. .. note:: **Time complexity**: ``O(1)`` :param key: The key to remove :type key: :class:`str`, :class:`bytes` :param value: The value to set :type value: :class:`str`, :class:`bytes`, :class:`int` :param int ex: Set the specified expire time, in seconds :param int px: Set the specified expire time, in milliseconds :param bool nx: Only set the key if it does not already exist :param bool xx: Only set the key if it already exist :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError` :raises: :exc:`ValueError` """ command = [b'SET', key, value] if ex: command += [b'EX', ascii(ex).encode('ascii')] if px: command += [b'PX', ascii(px).encode('ascii')] if nx: command.append(b'NX') if xx: command.append(b'XX') return self._execute(command, b'OK')
def bitcount(self, key, start=None, end=None): """Count the number of set bits (population counting) in a string. By default all the bytes contained in the string are examined. It is possible to specify the counting operation only in an interval passing the additional arguments start and end. Like for the :meth:`~tredis.RedisClient.getrange` command start and end can contain negative values in order to index bytes starting from the end of the string, where ``-1`` is the last byte, ``-2`` is the penultimate, and so forth. Non-existent keys are treated as empty strings, so the command will return zero. .. versionadded:: 0.2.0 .. note:: **Time complexity**: ``O(N)`` :param key: The key to get :type key: :class:`str`, :class:`bytes` :param int start: The start position to evaluate in the string :param int end: The end position to evaluate in the string :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`, :exc:`ValueError` """ command = [b'BITCOUNT', key] if start is not None and end is None: raise ValueError('Can not specify start without an end') elif start is None and end is not None: raise ValueError('Can not specify start without an end') elif start is not None and end is not None: command += [ascii(start), ascii(end)] return self._execute(command)
def setShowInfo(self): self.log('setShowInfo') if self.infoOffset > 0: self.getControl(502).setLabel(REAL_SETTINGS.getLocalizedString(30041)) elif self.infoOffset < 0: self.getControl(502).setLabel(REAL_SETTINGS.getLocalizedString(30042)) elif self.infoOffset == 0: self.getControl(502).setLabel(REAL_SETTINGS.getLocalizedString(30043)) if self.hideShortItems and self.infoOffset != 0: position = xbmc.PlayList(xbmc.PLAYLIST_MUSIC).getposition() curoffset = 0 modifier = 1 if self.infoOffset < 0: modifier = -1 while curoffset != abs(self.infoOffset): position = self.channels[self.currentChannel - 1].fixPlaylistIndex(position + modifier) if self.channels[self.currentChannel - 1].getItemDuration(position) >= self.shortItemLength: curoffset += 1 else: position = xbmc.PlayList(xbmc.PLAYLIST_MUSIC).getposition() + self.infoOffset self.getControl(503).setLabel(self.channels[self.currentChannel - 1].getItemTitle(position)) self.getControl(504).setLabel(self.channels[self.currentChannel - 1].getItemEpisodeTitle(position)) self.getControl(505).setLabel(self.channels[self.currentChannel - 1].getItemDescription(position)) self.getControl(506).setImage(self.channelLogos + ascii(self.channels[self.currentChannel - 1].name) + '.png') if not FileAccess.exists(self.channelLogos + ascii(self.channels[self.currentChannel - 1].name) + '.png'): self.getControl(506).setImage(IMAGES_LOC + 'Default.png') self.log('setShowInfo return')
def comparecur(path_o, path_b): list_o = os.listdir(path_o) list_b = os.listdir(path_b) for item_o in list_o: # suche neue Dateien if(not item_o in list_b): print("new file: " + ascii(path_o + item_o)) if(os.path.isdir(path_o+item_o)): shutil.copytree(path_o + item_o , path_b + item_o) else: shutil.copy2(path_o + item_o, path_b + item_o) if(os.path.isdir(path_o + item_o)): # durchsuche unterverzeichnis comparecur(path_o + item_o + "/", path_b + item_o + "/" ) else: if(os.path.getmtime(path_o+item_o)>os.path.getmtime(path_b+item_o)): print("changed: " + ascii(path_o + item_o)) shutil.copy2(path_o+item_o, path_b+item_o) for item_b in list_b: if(not item_b in list_o): if(item_b[:3]=="DEL"): dod = int(item_b[3:11]) if(dod < int(monthago)): print("finally deleted: " + ascii(path_b + item_b)) if (os.path.isdir(path_b+item_b)): shutil.rmtree(path_b+item_b) else: os.remove(path_b + item_b) else: print("deleted: " + ascii(path_b + item_b)) newname_b = "DEL" + today + "__" + item_b os.rename(path_b+item_b, path_b+newname_b)
def ctcp_stringify(messages): """ :type messages: ``list`` :param messages: a list of extended messages. An extended message is a ``(tag, data)`` tuple, where 'data' may be ``None``, a ``string``, or a ``list`` of strings to be joined with white-space. :rtype: ``str`` :returns: *Stringified* message """ coded_messages = [] for (tag, data) in messages: if data: if not isinstance(data, types.StringType): try: # data as list-of-strings data = " ".join(map(str, data)) except TypeError: # No? Then use it's %s representation. pass m = ascii("%s %s" % (tag, data)) else: m = ascii(tag) m = ctcp_quote(m) m = ascii("%s%s%s" % (X_DELIM, m, X_DELIM)) coded_messages.append(m) return ascii("").join(coded_messages)
def main (arg0, argv): D("argv: " + ascii(argv)) dbfile = argv[0] sockfile = argv[1] logformat = "%(asctime)s %(threadName)s %(levelname)s %(message)s" logging.basicConfig(format=logformat, level=logging.DEBUG) logging.Formatter.converter = time.gmtime import signal #signal.signal(signal.SIGINT, signal.SIG_IGN) #signal.signal(signal.SIGQUIT, signal.SIG_IGN) #signal.signal(signal.SIGTSTP, signal.SIG_IGN) enum_dir = "./init/enum.d" D("initializing enums") _init_enums_hardcoded(enum_dir) D("creating pipes") r, w = os.pipe() D("pipes: r="+ascii(r)+"; w="+ascii(w)) D("doing brain") do_brain(dbfile, sockfile, r) D("doing readline") do_readline(w) D("main thread done")
def __init__(self): cmd.Cmd.__init__(self) self.enabled_modules = enabled_modules self.target_num = 1 self.port = 22 self.targets = {} self.curtarget = None proc = subprocess.Popen( ["ifconfig | grep inet | head -n1 | cut -d\ -f12 | cut -d: -f2"], stdout=subprocess.PIPE, shell=True ) self.localIP = proc.stdout.read() if six.PY3: self.localIP = str(self.localIP[:-1], "utf-8") else: self.localIP = self.localIP[:-1].encode("ascii", "ignore").decode("ascii") self.ctrlc = False ascii() print( 'Welcome to BackdoorMe, a powerful backdooring utility. Type "help" to see the list of available commands.' ) print('Type "addtarget" to set a target, and "open" to open an SSH connection to that target.') print("Using local IP of %s." % self.localIP) self.addtarget("10.1.0.5", "student", "target123")
def setShowInfo(self): self.log('setShowInfo') basex, basey = self.getControl(111 + self.focusRow).getPosition() baseh = self.getControl(111 + self.focusRow).getHeight() basew = self.getControl(111 + self.focusRow).getWidth() # use the selected time to set the video left, top = self.channelButtons[self.focusRow][self.focusIndex].getPosition() width = self.channelButtons[self.focusRow][self.focusIndex].getWidth() left = left - basex + (width / 2) starttime = self.shownTime + (left / (basew / 5400.0)) chnoffset = self.focusRow - 2 newchan = self.centerChannel while chnoffset != 0: if chnoffset > 0: newchan = self.MyOverlayWindow.fixChannel(newchan + 1, True) chnoffset -= 1 else: newchan = self.MyOverlayWindow.fixChannel(newchan - 1, False) chnoffset += 1 plpos = self.determinePlaylistPosAtTime(starttime, newchan) if plpos == -1: self.log('Unable to find the proper playlist to set from EPG') return self.getControl(500).setLabel(self.MyOverlayWindow.channels[newchan - 1].getItemTitle(plpos)) self.getControl(501).setLabel(self.MyOverlayWindow.channels[newchan - 1].getItemEpisodeTitle(plpos)) self.getControl(502).setLabel(self.MyOverlayWindow.channels[newchan - 1].getItemDescription(plpos)) self.getControl(503).setImage(self.channelLogos + ascii(self.MyOverlayWindow.channels[newchan - 1].name) + '.png') if not FileAccess.exists(self.channelLogos + ascii(self.MyOverlayWindow.channels[newchan - 1].name) + '.png'): self.getControl(503).setImage(IMAGES_LOC + 'Default.png') self.log('setShowInfo return')
def compare_ngrams(left, right, N=2, pad_len=0): left = ascii(left) right = ascii(right) if len(left) == 1 and len(right) == 1: # NGram.compare returns 0.0 for 1 letter comparison, even if letters # are equal. return 1.0 if left == right else 0.0 return NGram.compare(left, right, N=N, pad_len=pad_len)
def wrapper(*args, **kwargs): x = f(*args, **kwargs) return ascii(x)
print(all([0, 4])) # >>> False print(all([])) # >>> True # argument: iterable objects # return True if all elements of the iterable are true # if the iterable is empty, return True print('\n', 3) print('any(iterable)') print(any([0, 4])) # >>> True # argument: iterable objects # return True if any elements of the iterable are true # if the iterable is empty, return True print('\n', 4) print('ascii(object)') print(ascii('ö')) # >>> xf6n print('Pyth\xf6n') # >>>Pythön # argument: an object # return a string containing a printable representation of an object, but escape the non-ASCII characters in the string returned by repr() using \x, \u, \U escapes. # For example, ö is changed to \xf6n, √ is changed to \u221a print('\n', 5) print('bin(x)') print(bin(3)) # >>> 0b11 print(bin(-10)) # >>> -0b1010 print(format(10, 'b')) # >>> 1010, this can remove the '0b' # convert to binary number with a prefix '0b' # argument: an integer number # return the binary value print('\n', 6)
def format_field(self, value, format_spec): zwróć format(value, format_spec) def convert_field(self, value, conversion): # do any conversion on the resulting object jeżeli conversion jest Nic: zwróć value albo_inaczej conversion == 's': zwróć str(value) albo_inaczej conversion == 'r': zwróć repr(value) albo_inaczej conversion == 'a': zwróć ascii(value) podnieś ValueError("Unknown conversion specifier {0!s}".format(conversion)) # returns an iterable that contains tuples of the form: # (literal_text, field_name, format_spec, conversion) # literal_text can be zero length # field_name can be Nic, w which case there's no # object to format oraz output # jeżeli field_name jest nie Nic, it jest looked up, formatted # przy format_spec oraz conversion oraz then used def parse(self, format_string): zwróć _string.formatter_parser(format_string) # given a field_name, find the object it references.
# ------------------------------------------------------------------------------------- # any(iterable) print("any()".center(30, "*")) print("any([]) = ", any([])) print("any([1,2,3]) = ", any([1, 2, 3])) print("any(['a',2,0]) = ", any(['a', 2, 0])) print("any(['a', 3, ' ']) = ", any(['a', 3, ' '])) print("any(['a', 3, '']) = ", any(['a', 3, ''])) print() # ------------------------------------------------------------------------------------- # ascii(object) - returns the printable representation of object passed in it print("ascii".center(30, "*")) print("ascii('¥') = ", ascii("¥")) print("ascii('µ') = ", ascii("µ")) print("ascii('Ë') = ", ascii("Ë")) print("ascii('a') = ", ascii('a')) print("ascii(1) = ", ascii(1)) print() # ------------------------------------------------------------------------------------- # bin(int) - returns binary representation of passed integer. print("bin()".center(30, "*")) print("bin(23) = ", bin(23)) print("bin(-23) = ", bin(-23)) print("You can get binary of a number using format() and f{}") print("format(23, #b) = ", format(23, '#b')) print("format(23, #b) = ", format(23, 'b'))
def timezone(zone): r''' Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(unicode('US/Eastern')) is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> try: ... timezone('Asia/Shangri-La') ... except UnknownTimeZoneError: ... print('Unknown') Unknown >>> try: ... timezone(unicode('\N{TRADE MARK SIGN}')) ... except UnknownTimeZoneError: ... print('Unknown') Unknown ''' if zone is None: raise UnknownTimeZoneError(None) if zone.upper() == 'UTC': return utc try: zone = ascii(zone) except UnicodeEncodeError: # All valid timezones are ASCII raise UnknownTimeZoneError(zone) zone = _case_insensitive_zone_lookup(_unmunge_zone(zone)) if zone not in _tzinfo_cache: if zone in all_timezones_set: # noqa fp = open_resource(zone) try: _tzinfo_cache[zone] = build_tzinfo(zone, fp) finally: fp.close() else: raise UnknownTimeZoneError(zone) return _tzinfo_cache[zone]
x = 'A string' self.assertEqual(f'{10=}', '10=10') # self.assertEqual(f'{x=}', 'x=' + x )#repr(x)) # TODO: add ' when printing strings # self.assertEqual(f'{x =}', 'x =' + x )# + repr(x)) # TODO: implement ' handling self.assertEqual(f'{x=!s}', 'x=' + str(x)) # # self.assertEqual(f'{x=!r}', 'x=' + x) #repr(x)) # !r not supported # self.assertEqual(f'{x=!a}', 'x=' + ascii(x)) x = 2.71828 self.assertEqual(f'{x=:.2f}', 'x=' + format(x, '.2f')) self.assertEqual(f'{x=:}', 'x=' + format(x, '')) self.assertEqual( f'{x=!r:^20}', 'x=' + format(repr(x), '^20') ) #TODO formatspecifier after conversion flsg is currently not supported (also for classical fstrings) self.assertEqual(f'{x=!s:^20}', 'x=' + format(str(x), '^20')) self.assertEqual(f'{x=!a:^20}', 'x=' + format(ascii(x), '^20')) x = 9 self.assertEqual(f'{3*x+15=}', '3*x+15=42') # There is code in ast.c that deals with non-ascii expression values. So, # use a unicode identifier to trigger that. tenπ = 31.4 self.assertEqual(f'{tenπ=:.2f}', 'tenπ=31.40') # Also test with Unicode in non-identifiers. #self.assertEqual(f'{"Σ"=}', '"Σ"=\'Σ\'') ' TODO ' missing # Make sure nested fstrings still work. self.assertEqual(f'{f"{3.1415=:.1f}":*^20}', '*****3.1415=3.1*****')
abs() dict() help() min() setattr() all() dir() hex() next() slice() any() divmod() id() object() sorted() ascii() enumerate() input() oct() staticmethod() bin() eval() int() open() str() bool() exec() isinstance() ord() sum() bytearray()
import itertools ''' product方法,产生序列的全部排列与组合能够形成的串 ''' # 基本上只能跑到8位的 测试了10位 我的mac 16G内存完全不够,显示python进程最后占用达到了50多G,内存不足被系统kill了 # Process finished with exit code 137 (interrupted by signal 9: SIGKILL) l1 = list(itertools.product("0123456789", repeat=5)) print(len(l1)) # 简易迭代器对象生成 iter = (x for x in range(1, 11)) # print(next(iter)) # print(next(iter)) # print(next(iter)) for i in iter: print(i) s = "0" print(ascii(s))
# http://www.runoob.com/python3/python3-func-ascii.html # TODO;Python3 ascii() 函数 """ 描述 ascii() 函数类似 repr() 函数, 返回一个表示对象的字符串, 但是对于字符串中的非 ASCII 字符则返回通过 repr() 函数使用 \x, \u 或 \U 编码的字符。 生成字符串类似 Python2 版本中 repr() 函数的返回值。 语法 以下是 ascii() 方法的语法: ascii(object) 参数 object -- 对象。 返回值 返回字符串。 实例 以下展示了使用 ascii() 方法的实例: """ print(ascii("1"))
'''TESTCASE en¿ - 这是一个中文例子 - 😋 - ↑ ↓ → ← ''' a = input() ans = [] for i in a: asc = ascii(i).strip('\'') if asc != i: ans.append(asc) print(*ans)
def ascii(char_1, char_2): number_1 = int(ord(char_1)) number_2 = int(ord(char_2)) for number in range(number_1 + 1, number_2): char = chr(number) print(char, end=' ') return char_1 = input() char_2 = input() start = ascii(char_1=char_1, char_2=char_2)
# built-in functions print("\n\nBuilt-in Functions\n\nAbsolute value of a number:\n\t", "abs(-1) == abs(1)", abs(-1) == abs(1)) list = [1, True, "TRUE", "true", "True"] dict = {"a": True, "b": 1} print('All()', '\n\tall([1, True, "TRUE", "true", "True"])', all(list), '\n\tall({"a": True, "b": 1})', all(dict)) list = [1, False, "a", "b", "0"] dict = {"a": True, "b": 0} print('Any()', '\n\tany([1, True, "TRUE", "true", "True"])', any(list), '\n\tany({"a": True, "b": 1})', any(dict)) print("ASCII()\n\t", 'ascii("/bɛŋˈɡɔːli/") => ', ascii("/bɛŋˈɡɔːli/")) print("bin()\n\t", 'bin(255) => ', bin(255)) print("bool()\n\t", 'bool("true") => ', bool("true")) print("chr()\n\t", 'chr(126) => ', chr(126)) itr = iter({"a": True, "b": 1}) print("iter()\n\t", 'iter({"a": True, "b": 1}) => ', itr) print("len()\n\t", 'len({"a": True, "b": 1}) => ', len({"a": True, "b": 1})) print("max()\n\t", 'max([2,50,35,15,22,67,59,0]) => ', max([2, 50, 35, 15, 22, 67, 59, 0])) print("min()\n\t", 'min([2,50,35,15,22,67,59,0]) => ',
def __init__(self, n): self._n = n def __repr__(self): return f'the number of bunnies is {self._n}' def __str__(self): return f'str: the number of bunnies is {self._n}' s = Bunny(47) print(repr(s)) # print the return of __repr__ x = Bunny(47) print(x) print(ascii(x)) # escape unicode value # Containers x = (1, 2, 3, 4, 5) y = x y2 = len(x) y3 = reversed(x) y4 = list(reversed(x)) # reversed obj string y5 = sum(x) y6 = sum(x, 10) y7 = max(x) y8 = min(x) y9 = any(x) # boolean function y10 = all(x) # boolean function print(x) print(y)
def exists(filename): try: return xbmcvfs.exists(filename) except UnicodeDecodeError: return FileAccess.exists(ascii(filename)) return False
def scan_layers_in_mxd(mxdPath): dsList = [] mxd = None # scan the mxd file try: mxd = arcpy.mapping.MapDocument(mxdPath) lyrs = arcpy.mapping.ListLayers(mxd) dsList.append({"Name": "MXD File", "Data Source": mxdPath}) lyrTitle = 'Map Layer' sourceTitle = 'Data Source' print('%-60s%s' % (lyrTitle, sourceTitle)) print('%-60s%s' % ('-' * len(lyrTitle), '-' * len(sourceTitle))) for lyr in lyrs: if lyr.isFeatureLayer == True: lyrType = "FeatureLayer" elif lyr.isRasterLayer == True: lyrType = "RasterLayer" elif lyr.isServiceLayer == True: lyrType = "ServiceLayer" elif lyr.isGroupLayer == True: lyrType = "GroupLayer" else: lyrType = "UnknownLayer" try: if lyrType in ["GroupLayer", "ServiceLayer", "UnknownLayer"]: print('%-60s%s' % (ascii(lyr.name), "*** skip " + lyrType)) else: # get the layer data source ds = lyr.dataSource print('%-60s%s' % (ascii(lyr.name), ds)) # get the layer def query defQry = None if lyr.supports("DEFINITIONQUERY") == True: defQry = lyr.definitionQuery # get the layer description dspt = None if lyr.supports("DESCRIPTION") == True: dspt = lyr.description # # verify the layer data source verified = verify_layer_dataSource(ds, lyrType) # check if the layer is within the scope srcType = get_source_type(ds) # # get the Livelink path llPath = None if lyrType == "FeatureLayer" and verified and srcType is not None: llPath = find_Livelink_path(ds) if llPath is None: print('%-60s%s' % (ascii(lyr.name), "??? no Livelink path found for " + ds)) # dsList.append({ "Name": lyr.name, "Data Source": ds, "Layer Type": lyrType, "Verified?": verified, "Definition Query": defQry, "Description": dspt, "Livelink Link": llPath }) except: print('%-60s%s' % (ascii(lyr.name), ">>> failed to retrieve info " + lyrType + ": " + str(sys.exc_info()[0]))) except: print('Failed to process the mxd file [%s]: %s' % (mxdPath, sys.exc_info()[0])) finally: del mxd return dsList
while (waitInput == 1): if (firstStart == 0): sys.stdout.write( '\nSelect an option below\n0. Read Header\n1. Dump ROM\n2. Save RAM\n3. Write RAM\n4. Exit\n' ) sys.stdout.write('>') sys.stdout.flush() userInput = input() firstStart = 0 if (userInput == "0"): ser.write('HEADER'.encode()) sys.stdout.write('\n') sys.stdout.write('Game title... ') gameTitle = ascii(ser.readline()) gameTitle = gameTitle[2:(len(gameTitle) - 5)] print(gameTitle) sys.stdout.write('MBC type... ') cartridgeType = ascii(ser.readline()) cartridgeType = int(cartridgeType[2:(len(cartridgeType) - 5)]) if (cartridgeType == 0): print('ROM ONLY') elif (cartridgeType == 1): print('MBC1') elif (cartridgeType == 2): print('MBC1+RAM') elif (cartridgeType == 3): print('MBC1+RAM+BATTERY') elif (cartridgeType == 5):
def test_coding(self): # bpo-32381: the -c command ignores the coding cookie ch = os_helper.FS_NONASCII cmd = f"# coding: latin1\nprint(ascii('{ch}'))" res = assert_python_ok('-c', cmd) self.assertEqual(res.out.rstrip(), ascii(ch).encode('ascii'))