def __init__(self, master, title, dict=None): width = 0 height = 40 if dict: height = 20 * len(dict) self.master = master self.title = title import reprlib self.repr = reprlib.Repr() self.repr.maxstring = 60 self.repr.maxother = 60 self.frame = frame = Frame(master) self.frame.pack(expand=1, fill='both') self.label = Label(frame, text=title, borderwidth=2, relief='groove') self.label.pack(fill='x') self.vbar = vbar = Scrollbar(frame, name='vbar') vbar.pack(side='right', fill='y') self.canvas = canvas = Canvas(frame, height=min(300, max(40, height)), scrollregion=(0, 0, width, height)) canvas.pack(side='left', fill='both', expand=1) vbar['command'] = canvas.yview canvas['yscrollcommand'] = vbar.set self.subframe = subframe = Frame(canvas) self.sfid = canvas.create_window(0, 0, window=subframe, anchor='nw') self.load_dict(dict)
def run(output_path=None, user_provided_master_path=None, search_term: str = None): if user_provided_master_path: logger.info(f"Using master file: {user_provided_master_path}") wb = load_workbook(user_provided_master_path) else: logger.info(f"Using default master file (refer to config.ini)") wb = load_workbook(MASTER_XLSX) project_count = get_number_of_projects(wb) print("{:<50}{:<50}{:<10}".format("Project", "Key", "Value")) print("{:*<140}".format("")) r = reprlib.Repr() r.maxstring = 48 for p in range(2, project_count + 2): # do the work project_name, data = process_master(wb, p, search_term) for item in data: print("{:<50}{:<50}{:<10}".format(r.repr(project_name), r.repr(item[0]), r.repr(item[1])))
def __init__(self, master, title, dict=None): width = 0 height = 40 if dict: height = 20*len(dict) # XXX 20 == observed height of Entry widget self.master = master self.title = title import reprlib self.repr = reprlib.Repr() self.repr.maxstring = 60 self.repr.maxother = 60 self.frame = frame = Frame(master) self.frame.pack(expand=1, fill="both") self.label = Label(frame, text=title, borderwidth=2, relief="groove") self.label.pack(fill="x") self.vbar = vbar = Scrollbar(frame, name="vbar") vbar.pack(side="right", fill="y") self.canvas = canvas = Canvas(frame, height=min(300, max(40, height)), scrollregion=(0, 0, width, height)) canvas.pack(side="left", fill="both", expand=1) vbar["command"] = canvas.yview canvas["yscrollcommand"] = vbar.set self.subframe = subframe = Frame(canvas) self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw") self.load_dict(dict)
def __init__(self): self._scheduler = sched.scheduler(timefunc=time_ns, delayfunc=sleep_ns) self._jobs : typing.Dict[Job, typing.Optional[int, None]] = {} self._processors : typing.List[typing.Callable[[Job, typing.Any], None]] = [] self._time_start_ns :int = time_ns() self._lookahead_ns : int = 1000 * 1000 * 1000 * 60 * 120 self._repr = reprlib.Repr()
def __repr__(self): rlib = reprlib.Repr() rlib.maxlist = 3 # Define repr string class_name = type(self).__name__ model_str = rlib.repr(self.model) log_p_lik_str = rlib.repr(self.log_p_lik) cmp_log_p_lik_str = rlib.repr(self.cmp_log_p_lik) log_p_pri_str = rlib.repr(self.log_p_pri) log_p_gen_str = rlib.repr(self.log_p_gen) nc_str = rlib.repr(self.nc) return '{}(model={},\nlog_p_lik={},\ncmp_log_p_lik={},\n' \ 'log_p_pri={},\nlog_p_gen={},\nncmax={!r},\nnc={},\n' \ 'nmtype={},\npath_res={})' \ .format(class_name, model_str, log_p_lik_str, cmp_log_p_lik_str, log_p_pri_str, log_p_gen_str, self.ncmax, nc_str, self.nmtype, self.path_res)
def init(access_token, environment='production', **kw): """ Saves configuration variables in this module's SETTINGS. access_token: project access token. Get this from the Rollbar UI: - click "Settings" in the top nav - click "Projects" in the left nav - copy-paste the appropriate token. environment: environment name. Can be any string; suggestions: 'production', 'development', 'staging', 'yourname' **kw: provided keyword arguments will override keys in SETTINGS. """ global SETTINGS, agent_log, _initialized, _repr if not _initialized: _initialized = True SETTINGS['access_token'] = access_token SETTINGS['environment'] = environment # Merge the extra config settings into SETTINGS SETTINGS = dict_merge(SETTINGS, kw) if SETTINGS.get('allow_logging_basic_config'): logging.basicConfig() if SETTINGS.get('handler') == 'agent': agent_log = _create_agent_log() _repr = reprlib.Repr() for name, size in SETTINGS['locals']['sizes'].items(): setattr(_repr, name, size)
def test_reprlib_dict() -> None: """Limit the number of entries in a dictionary representation.""" test_dict = {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6} assert reprlib.repr(test_dict) == "{'a': 1, 'b': 2, 'c': 3, 'd': 4, ...}" (short_repr := reprlib.Repr()).maxdict = 2 assert short_repr.repr(test_dict) == "{'a': 1, 'b': 2, ...}"
def __repr__(self): reprer = reprlib.Repr() reprer.maxstring = 100 reprer.maxother = 100 meshes_names = reprer.repr(self._meshes) if self.name is not None: return f"{self.__class__.__name__}({meshes_names}, name={self.name})" else: return f"{self.__class__.__name__}{meshes_names}"
def __repr__(self): rlib = reprlib.Repr() rlib.maxlist = 3 class_name = type(self).__name__ param_val_str = rlib.repr(self.param_val) return '{}({!r}, {})'.format(class_name, self.param_val.dtype.__str__(), param_val_str)
def __repr__(self): reprer = reprlib.Repr() reprer.maxstring = 90 reprer.maxother = 90 slice_name = reprer.repr(self._meshes[0]) if self.name is not None: return f"{self.__class__.__name__}({slice_name}, name={self.name})" else: return f"{self.__class__.__name__}({slice_name})"
def test_reprlib_string() -> None: """Limit the number of characters in a string representation.""" test_str = "thequickbrownfoxjumpsoverthelazydog" assert (reprlib_str := reprlib.repr(test_str)) == "'thequickbrow...verthelazydog'" assert len(reprlib_str) == 30 (short_repr := reprlib.Repr()).maxstring = 15 assert (short_reprlib_str := short_repr.repr(test_str)) == "'thequ...zydog'" assert len(short_reprlib_str) == 15
async def _get_sauce(self, ctx: commands.context.Context, url: str) -> typing.Optional[GenericSource]: """ Perform a SauceNao lookup on the supplied URL Args: ctx (commands.context.Context): url (str): Returns: typing.Optional[GenericSource] """ # Get the API key for this server api_key = Servers.lookup_guild(ctx.guild) if not api_key: api_key = self._api_key # Log the query SauceQueries.log(ctx, url) cache = SauceCache.fetch(url) # type: SauceCache if cache: container = getattr(pysaucenao.containers, cache.result_class) sauce = container(cache.header, cache.result) # type: GenericSource self._log.info(f'Cache entry found: {sauce.title}') else: # Initialize SauceNao and execute a search query saucenao = SauceNao(api_key=api_key, min_similarity=float( config.get('SauceNao', 'min_similarity', fallback=50.0)), priority=[21, 22, 5, 37, 25]) search = await saucenao.from_url(url) sauce = search.results[0] if search.results else None # Log output rep = reprlib.Repr() rep.maxstring = 16 self._log.debug( f"[{ctx.guild.name}] {search.short_remaining} short API queries remaining for {rep.repr(api_key)}" ) self._log.info( f"[{ctx.guild.name}] {search.long_remaining} daily API queries remaining for {rep.repr(api_key)}" ) # Cache the search result if sauce: SauceCache.add_or_update(url, sauce) return sauce
def run(output_path=None, user_provided_master_path=None, search_term: str=None, xlsx: bool=False): if user_provided_master_path: logger.info(f"Using master file: {user_provided_master_path}") wb = load_workbook(user_provided_master_path) else: logger.info(f"Using default master file (refer to config.ini)") wb = load_workbook(MASTER_XLSX) project_count = get_number_of_projects(wb) if not xlsx: print("{:<50}{:<50}{:<10}".format("Project", "Key", "Value")) print("{:*<140}".format("")) r = reprlib.Repr() r.maxstring = 48 for p in range(2, project_count + 2): # do the work project_name, data = process_master(wb, p, search_term) for item in data: print("{:<50}{:<50}{:<10}".format( r.repr(project_name), r.repr(item[0]), r.repr(item[1]))) else: output_wb = Workbook() ws = output_wb.active ws.title = "Results of search" start_row = 1 def val_gen(row, p_name, t, ws): yield ws.cell(column=1, row=row, value=p_name) yield ws.cell(column=2, row=row, value=t[0]) yield ws.cell(column=3, row=row, value=t[1]) for p in range(2, project_count + 2): # do the work project_name, data = process_master(wb, p, search_term) logger.info(f"Processing {project_name}") for i, item in enumerate(data, start_row): g = val_gen(start_row, project_name, item, ws) for cell in range(1, 4): next(g) start_row += 1 output_wb.save(xlsx[0])
def pprint_succinct(_dic, maxlvl=4, maxlen=80): """ Pretty-prints dictionaries and jsons limiting output amount :param _dic: json to print :param maxlvl: maximum depth to print; 4 by default :param maxlen: maximum width of output; 80 chars by default """ myrepr = reprlib.Repr() myrepr.maxstring = maxlen myrepr.maxlevel = maxlvl print('{') for k, v in _dic.items(): print(f' \'{k}\': {myrepr.repr(v)},') print('}')
def __init__(self, address=0, data=None): self.address = address if data is None: self.data = bytearray() else: self.data = bytearray( data ) # bytearray seems to be the most appropriate canonical representation. self._length = len(self.data) if isinstance(data, array) and data.typecode != 'B': if PYTHON_VERSION.major == 3: data = array('B', data.tobytes()) else: data = array('B', data.tostring()) self.repr = reprlib.Repr() self.repr.maxstring = 64 self.repr.maxother = 64
def handle(self, *args, **kwargs): r = reprlib.Repr() r.maxlist = 4 feature = { "type": "Feature", "geometry": { "type": "Point", "coordinates": [125.6, 10.1] } } json.dumps(feature) # First tested 'heatmap/test.json'. with open('https://blooming-journey-52100.herokuapp.com/api/', 'w') as stream: json.dump(feature, stream, indent=2) print(r.rep(feature))
def __str__(self): """ Returns a string representation of a SizedContainerTimeSeriesInterface instance, of the form "Class_name with 'n' elements (Times: 't', Values: 'v')" where n is the length of `self` t displays the first three elements of _times v displays the first three elements of _values """ r = reprlib.Repr() r.maxlist = 3 # max elements displayed for lists cls = type(self).__name__ times_str = r.repr(self._times) values_str = r.repr(self._values) return "{} with {} elements (Times: {}, Values: {})".format(cls, len(self._values), times_str, values_str)
class Tag: code: int datatype: int count: int data: bytes value: Any = None offset_range: range = None _vrepr = reprlib.Repr() _vrepr.maxstring = 60 _vrepr.maxother = 60 vrepr = _vrepr.repr def __repr__(self): return (self.__class__.__qualname__ + "(" + f"code={self.code!r}, datatype={self.datatype!r}, " + f"count={self.count!r}, data={self.data!r}, " + f"value={self.vrepr(self.value)}" + ")")
def test_repr(self) -> None: a_repr = reprlib.Repr() a_repr.maxlist = 3 @icontract.require(lambda x: len(x) < 10, a_repr=a_repr) def some_func(x: List[int]) -> None: pass violation_error = None # type: Optional[icontract.ViolationError] try: some_func(x=list(range(10 * 1000))) except icontract.ViolationError as err: violation_error = err self.assertIsNotNone(violation_error) self.assertEqual("len(x) < 10:\n" "len(x) was 10000\n" "x was [0, 1, 2, ...]", tests.error.wo_mandatory_location(str(violation_error)))
def __init__(self: object, backoff: object, verbose: bool = False): """ Setup for SequentialBackoffLemmatizer :param backoff: Next lemmatizer in backoff chain :param verbose: Flag to include which lemmatizer assigned in a given tag in the return tuple """ SequentialBackoffTagger.__init__(self, backoff=None) # Setup backoff chain if backoff is None: self._taggers = [self] else: self._taggers = [self] + backoff._taggers self.VERBOSE = verbose self.repr = reprlib.Repr() self.repr.maxlist = 1 self.repr.maxdict = 1
def reformat_signatures(app, what, name, obj, options, signature, return_annotation): import inspect import reprlib if what == 'attribute': return None, 'derp' if what not in ('class', 'exception', 'method', 'function'): return if what in ('class', 'exception'): obj = getattr(obj, '__init__') try: args, varargs, keywords, defaults = inspect.getargspec(obj) except TypeError: return if args and args[0] == 'self': del args[0] doc_repr = reprlib.Repr() doc_repr.maxstring = 40 with_defaults = [] without_defaults = [] if defaults: num_without_defaults = len(args) - len(defaults) for name in args[:num_without_defaults]: without_defaults.append(name) for i, value in enumerate(defaults): name = args[num_without_defaults + i] value_str = doc_repr.repr(value) with_defaults.append('{0}={1}'.format(name, value_str)) else: without_defaults = args if varargs is not None: with_defaults.append('*{0}'.format(varargs)) if keywords is not None: with_defaults.append('**{0}'.format(keywords)) result = ', '.join(without_defaults) if with_defaults: if result: result += ', ' result += ', '.join(['[' + piece for piece in with_defaults]) result += ']' * len(with_defaults) result = '({0})'.format(result) return result, return_annotation
def __init__( self, prefix="", in_place=False, output_limit=DEFAULT_OUTPUT_LIMIT, repr_limits=False, **pipeline_step_kwargs): super().__init__(**pipeline_step_kwargs) self.prefix = prefix self.in_place = in_place self.ran_once = False self.output_limit = output_limit if repr_limits: print_repr = reprlib.Repr() for attr_name, attr_value in repr_limits.items(): setattr(print_repr, attr_name, attr_value) self.repr_fn = print_repr.repr else: self.repr_fn = repr
def __init__(self, file=None): try: # Python 3 import reprlib except ImportError: # Python 2 import repr as reprlib if file is not None: self.stream = file else: self.stream = sys.stdout self.cumulative = False self._getter = _GetUncollectable() self._objects = [] self.color = self.stream.isatty() reprobj = reprlib.Repr() reprobj.maxstring = 100 reprobj.maxother = 100 reprobj.maxlevel = 1 self.format_object = reprobj.repr
def __init__(self, only_repr=False): # this dictionary holds all the supported functions import reprlib import pprint self._pretty_print = pprint r = reprlib.Repr() r.maxarray = 100 r.maxstring = 100 r.maxother = 100 r.maxtuple = 100 r.maxlist = 100 r.maxlevel = 3 self._repr = r self._only_repr = only_repr self._supported_types = { "tuple": self._parse_tuple, "NoneType": self._parse_nonetype, "set": self._parse_set, "frozenset": self._parse_frozenset, "bytearray": self._parse_bytearray, "str": self._parse_str, "datetime.datetime": self._parse_datetime_datetime, "bool": self._parse_bool, "decimal.Decimal": self._parse_decimal_decimal, "type": self._parse_type, "range": self._parse_range, "pandas.core.frame.DataFrame": self._parse_pandas_dataframe, "numpy.ndarray": self._parse_numpy_ndarray, "dict": self._parse_dict, "float": self._parse_float, "complex": self._parse_complex, "int": self._parse_int, "Exception": self._parse_exception, "list": self._parse_list, "bytes": self._parse_bytes, }
def run(self, head): self.workon(head) if self.current.head.payload.get('state') == 'closed': return logger.info("Skipping closed head.") logger.info("Listing commits from GitHub.") payload = yield from self.current.head.fetch_commits() self.current.commits = list( self.current.repository.process_commits(payload)) self.current.last_commit = self.current.commits[0] logger.info("Fetching latest job status on GitHub.") payload = yield from self.current.last_commit.fetch_statuses() self.current.last_commit.process_statuses(payload) self.current.statuses = self.current.last_commit.statuses for ext in self.extensions: try: ext.begin() except SkipHead: return logger.info("Queyring comments for instructions.") payload = yield from self.current.head.fetch_comments() self.process_instructions(payload) repr_ = reprlib.Repr() repr_.maxdict = repr_.maxlist = repr_.maxother = 64 vars_repr = repr_.repr1(dict(self.current), 2) logger.debug("Bot vars: %s", vars_repr) for ext in self.extensions: try: yield from ext.run() except SkipHead: return
import reprlib from secrets import token_urlsafe from simplejson import dumps as json_dumps _repr_obj = reprlib.Repr() _repr_obj.maxstr = 60 _repr_obj.maxother = 60 smart_repr = _repr_obj.repr def random_str(length): while True: x = token_urlsafe(length) x = x[:length] if not x.isalnum(): continue return x.lower() def to_compact_json(obj): return json_dumps(obj, separators=(',', ':'))
if capital == Capital: found = True country = info['CountryName'] capLat = info['CapitalLatitude'] capLon = info['CapitalLongitude'] capLoc = capLat, capLon capName = str(i) + '. ' + country + ', ' + capital folium.Marker(capLoc, capName, icon=folium.Icon(color='blue')).add_to(M) n += 1 M.save('path.html') return path r = reprlib.Repr() r.maxlist = 20 r.maxstring = 20 r.maxlevel = 20 def minimumSpanningTree(ana): countries = ana['countries'] countryKeys = m.keySet(countries) points = ana['points'] search = prim.PrimMST(ana['connections']) relaxed = prim.prim(ana['connections'], search, 'Bogota') prim.edgesMST(ana['connections'], search) mst = relaxed['mst'] size = lt.size(mst) weight = round(prim.weightMST(ana['connections'], relaxed), 2)
async def send_reply(self, tweet_cache: TweetCache, media_cache: TweetCache, sauce_cache: TweetSauceCache, requested: bool = True, blocked: bool = False) -> None: """ Return the source of the image Args: tweet_cache (TweetCache): The tweet to reply to media_cache (TweetCache): The tweet containing media elements sauce_cache (Optional[GenericSource]): The sauce found (or None if nothing was found) requested (bool): True if the lookup was requested, or False if this is a monitored user account blocked (bool): If True, the account posting this has blocked the SauceBot Returns: None """ tweet = tweet_cache.tweet sauce = sauce_cache.sauce if sauce and self.ignored_indexes and (int(sauce.index_id) in self.ignored_indexes): self.log.info( f"Ignoring result from ignored index ID {sauce.index_id}") sauce = None if sauce is None: if self.failed_responses and requested: media = TweetManager.extract_media(media_cache.tweet) if not media: return yandex_url = f"https://yandex.com/images/search?url={media[sauce_cache.index_no]}&rpt=imageview" ascii_url = f"https://ascii2d.net/search/url/{media[sauce_cache.index_no]}" google_url = f"https://www.google.com/searchbyimage?image_url={media[sauce_cache.index_no]}&safe=off" message = lang('Errors', 'no_results', { 'yandex_url': yandex_url, 'ascii_url': ascii_url, 'google_url': google_url }, user=tweet.author) self._post(msg=message, to=tweet.id) return # Get the artists Twitter handle if possible twitter_sauce = None if isinstance(sauce, PixivSource): twitter_sauce = self.pixiv.get_author_twitter( sauce.data['member_id']) # If we're requesting sauce from the original artist, just say so if twitter_sauce and twitter_sauce.lstrip( '@').lower() == media_cache.tweet.author.screen_name.lower(): self.log.info( "User requested sauce from a post by the original artist") message = lang('Errors', 'sauced_the_artist') self._post(message, to=tweet.id) return # Lines with priority attributes incase we need to shorten them lines = [] # Add additional sauce URL's if available sauce_urls = [] if isinstance(sauce, AnimeSource): await sauce.load_ids() if self.anime_link in ['myanimelist', 'animal', 'all' ] and sauce.mal_url: sauce_urls.append(sauce.mal_url) if self.anime_link in ['anilist', 'animal', 'all' ] and sauce.anilist_url: sauce_urls.append(sauce.anilist_url) if self.anime_link in ['anidb', 'all']: sauce_urls.append(sauce.url) # Only add Twitter source URL's for booru's, otherwise we may link to something that angers the Twitter gods if isinstance(sauce, BooruSource): for url in sauce.urls: if 'twitter.com' in url: sauce_urls.append(url) if 'twitter.com' in sauce.source_url: sauce_urls.append(sauce.source_url) # For limiting the length of the title/author _repr = reprlib.Repr() _repr.maxstring = 32 # H-Misc doesn't have a source to link to, so we need to try and provide the full title if sauce.index not in ['H-Misc', 'E-Hentai', 'H-Anime']: title = _repr.repr(sauce.title).strip("'") else: _repr.maxstring = 128 title = _repr.repr(sauce.title).strip("'") # Format the similarity string similarity = lang('Accuracy', 'prefix', {'similarity': sauce.similarity}) if sauce.similarity >= 95: similarity = similarity + " " + lang('Accuracy', 'exact') elif sauce.similarity >= 85.0: similarity = similarity + " " + lang('Accuracy', 'high') elif sauce.similarity >= 70.0: similarity = similarity + " " + lang('Accuracy', 'medium') elif sauce.similarity >= 60.0: similarity = similarity + " " + lang('Accuracy', 'low') else: similarity = similarity + " " + lang('Accuracy', 'very_low') if requested: if sauce.similarity >= 60.0: reply = lang('Results', 'requested_found', {'index': sauce.index}, user=tweet.author) + "\n" lines.append(ReplyLine(reply, 1)) else: reply = lang('Results', 'requested_found_low_accuracy', {'index': sauce.index}, user=tweet.author) + "\n" lines.append(ReplyLine(reply, 1)) else: if sauce.similarity >= 60.0: reply = lang('Results', 'other_found', {'index': sauce.index}, user=tweet.author) + "\n" lines.append(ReplyLine(reply, 1)) else: reply = lang('Results', 'other_found_low_accuracy', {'index': sauce.index}, user=tweet.author) lines.append(ReplyLine(reply, 1)) # If it's a Pixiv source, try and get their Twitter handle (this is considered most important and displayed first) if twitter_sauce: reply = lang('Results', 'twitter', {'twitter': twitter_sauce}) lines.append(ReplyLine(reply, newlines=1)) # Print the author name if available if sauce.author_name: author = _repr.repr(sauce.author_name).strip("'") reply = lang('Results', 'author', {'author': author}) lines.append(ReplyLine(reply, newlines=1)) # Omit the title for Pixiv results since it's usually always non-romanized Japanese and not very helpful if not isinstance(sauce, PixivSource): reply = lang('Results', 'title', {'title': title}) lines.append(ReplyLine(reply, 10, newlines=1)) # Add the episode number and timestamp for video sources if isinstance(sauce, VideoSource) and sauce.episode: reply = lang('Results', 'episode', {'episode': sauce.episode}) if sauce.timestamp: reply += " " + lang('Results', 'timestamp', {'timestamp': sauce.timestamp}) lines.append(ReplyLine(reply, 5, newlines=1)) # Add character and material info for booru results if isinstance(sauce, BooruSource): if sauce.material: reply = lang('Results', 'material', {'material': sauce.material[0].title()}) lines.append(ReplyLine(reply, 5, newlines=1)) if sauce.characters: reply = lang('Results', 'character', {'character': sauce.characters[0].title()}) lines.append(ReplyLine(reply, 4, newlines=1)) # Add the chapter for manga sources if isinstance(sauce, MangaSource) and sauce.chapter: reply = lang('Results', 'chapter', {'chapter': sauce.chapter}) lines.append(ReplyLine(reply, 5, newlines=1)) # Display our confidence rating lines.append(ReplyLine(similarity, 2, newlines=1)) # Source URL's are not available in some indexes if sauce.index not in [ 'H-Misc', 'H-Anime', 'H-Magazines', 'H-Game CG', 'Mangadex' ]: if sauce_urls: reply = "\n".join(sauce_urls) lines.append(ReplyLine(reply, newlines=2)) elif sauce.source_url and not isinstance(sauce, BooruSource): lines.append(ReplyLine(sauce.source_url, newlines=2)) # Try and append bot instructions with monitored posts. This might make our post too long, though. if not requested: promo_footer = lang('Results', 'other_footer') if promo_footer: lines.append(ReplyLine(promo_footer, 0, newlines=2)) elif config.getboolean('System', 'display_patreon'): lines.append( ReplyLine( "Support SauceBot!\nhttps://www.patreon.com/saucebot", 3, newlines=2)) # trace.moe time! Let's get a video preview if sauce_cache.media_id: comment = self._post(msg=lines, to=tweet.id, media_ids=[sauce_cache.media_id]) # This was hentai and we want to avoid uploading hentai clips to this account else: comment = self._post(msg=lines, to=tweet.id) # If we've been blocked by this user and have the artists Twitter handle, send the artist a DMCA guide if blocked and twitter_sauce: self.log.info(f"Sending {twitter_sauce} DMCA takedown advice") message = lang('Errors', 'blocked_dmca', {'twitter_artist': twitter_sauce}) # noinspection PyUnboundLocalVariable self._post(msg=message, to=comment.id)
# see <http://www.gnu.org/licenses/>. #* Imports from __future__ import print_function import ast import sys import inspect import re import platform import shlex import types import collections import pprint as pp try: import reprlib repr1 = reprlib.Repr() repr1.maxlist = 10 repr1.maxstring = 100 except: pass try: import jedi except: print("failed to load jedi") #* Classes class Stack: line_numbers = {} def __init__(self, tb): self.stack = [] while tb:
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import reprlib shortener = reprlib.Repr() shortener.maxstring = 150 shorten = shortener.repr class Speedscope: def __init__(self, name='Speedscope', init_stack_trace=None): self.init_stack_trace = init_stack_trace or [] self.init_stack_trace_level = len(self.init_stack_trace) self.caller_frame = None self.convert_stack(self.init_stack_trace) self.init_caller_frame = None if self.init_stack_trace: self.init_caller_frame = self.init_stack_trace[-1] self.profiles_raw = {} self.name = name self.frames_indexes = {} self.frame_count = 0 self.profiles = [] def add(self, key, profile): for entry in profile: self.caller_frame = self.init_caller_frame self.convert_stack(entry['stack'] or []) if 'query' in entry: query = entry['query']