def __call__(self, setid, msg, *args, **kwargs): if setid not in self.registered: raise ValueError, "Not registered debug ID %s" % setid if not setid in self.active: # don't even compute the metrics, since they might # be statefull as RelativeTime return msg_ = ' / '.join([str(x()) for x in self.__metrics]) if len(msg_) > 0: msg_ = "{%s}" % msg_ if len(msg) > 0: # determine blank offset using backstacktrace if self._offsetbydepth: level = len(traceback.extract_stack()) - 2 else: level = 1 if len(msg) > 250 and 'DBG' in self.active and not setid.endswith('_TB'): tb = traceback.extract_stack(limit=2) msg += " !!!2LONG!!!. From %s" % str(tb[0]) msg = "DBG%s:%s%s" % (msg_, " "*level, msg) SetLogger.__call__(self, setid, msg, *args, **kwargs) else: msg = msg_ Logger.__call__(self, msg, *args, **kwargs)
def __init__(self, def_name=None): if def_name == None: (filename,line_number,function_name,text) = \ traceback.extract_stack()[-2] print traceback.extract_stack() def_name = text[:text.find('=')].strip() self.instance_name = def_name
def log_callstack(back_trace=False): """ Helper function that formats either a (filtered) backtrace or call stack in a string. Blender internals are filtered such that errors in the own code can be detected more easily. :param back_trace: If true, the backtrace is returned. Otherwise, the call stack is returned. :return: the formatted call stack/backtrace in a string. """ if not back_trace: message = BACKTRACE_MESSAGE_CALLSTACK % len([i for i in traceback.extract_stack() if i[2] == 'run']) stack = traceback.extract_stack()[:-1] else: message = BACKTRACE_MESSAGE stack = traceback.extract_tb(sys.exc_info()[2]) last_call = "" for path, line, func, code in stack: if 'addons' in path: file = '...' + path[path.find('addons') + 6:] elif 'scripts' in path: file = '...' + path[path.find('scripts') + 7:] else: file = path if func not in BACKTRACE_FILTER_FUNC: if func in BACKTRACE_FILTER_HIDE_CODE: message += BACKTRACE_MESSAGE_STACK.format(func, file, line) else: message += BACKTRACE_MESSAGE_STACK_CODE.format(func, file, line, code, last_call) last_call = code return message
def parsemessage(self, linesiter, properties = {}): if DEBUG: log.out("parsemessage(%d) starting" % len(traceback.extract_stack())) lastkey = None for line in linesiter: # if possible (empty lines marks end of header), proceed with content parsing if not line: if DEBUG: log.out("parsemessage(%d): empty line, %d properties, parsing content" % (len(traceback.extract_stack()), len(properties))) content = self.parsecontent(linesiter, properties) if not content: if DEBUG: log.out("parsemessage(%d): no content" % len(traceback.extract_stack())) break if DEBUG: log.out("parsemessage(%d) leaving with content" % len(traceback.extract_stack())) return content if line[0] == "\t" or line[0] == " ": if not lastkey: continue properties[lastkey] += " " + line[1:] else: p = line.split(": ", 1) if len(p) < 2: continue lastkey = p[0].lower() value = p[1] properties[lastkey] = value if DEBUG: log.out("parsemessage(%d) leaving without content" % len(traceback.extract_stack()))
def makeDict(*args): strAllStack = str(extract_stack()) intNumLevels = len( extract_stack() ) intLevel = 0 blnFinished = False while not blnFinished: strStack = str( extract_stack()[intLevel] ) if strStack.find( "makeDict( ")>0: blnFinished = True intLevel += 1 if intLevel >= intNumLevels: blnFinished = True strStartText = "= makeDict( " intLen = len( strStartText ) intOpenParenLoc = strStack.find( strStartText ) intCloseParenLoc = strStack.find(")", intOpenParenLoc ) strArgs = strStack[ intOpenParenLoc+intLen : intCloseParenLoc ].strip() lstVarNames = strArgs.split(",") lstVarNames = [ s.strip() for s in lstVarNames ] if len( lstVarNames ) == len( args ): tplArgs = map( None, lstVarNames, args ) newDict = dict( tplArgs ) return newDict else: print "Error. makeDict Failed." return None
def main(): print'begin' #如果这里不捕捉异常的话,程序运行到打印出begin就结束了,final是不会被打印的 try: sys.exit('exitok') except : print traceback.extract_stack() print'final'
def test_kmeans(filename): stripped_name = filename.split("/")[-1].split(".")[0] from sklearn.cluster import KMeans contents = stripped_name.split("_") no_clusters = int(contents[4].split(".")[0]) confusion_matrices = [] start_time = time.time() df = pd.read_csv(filename) h_indep = [d for d in df.columns if "features" in d] h_dep = [d for d in df.columns if "class" in d] for _ in xrange(10): try: print "- ", sys.stdout.flush() indep = df[h_indep] dep = df[h_dep] kmeans = KMeans(n_clusters =no_clusters) kmeans.fit(indep) print kmeans.inertia_ import pdb pdb.set_trace() except: import traceback traceback.extract_stack() import pickle pickle.dump(confusion_matrices, open("./Results_K_Means/Kmeans_" + extract_name, "wb")) print " Total Time: ", time.time() - start_time
def trace(self, entry , params=None): """Internal method - see log.trace.__doc__ for details.""" # Turn the list of values into a printable string. if params: paramString = string.join( ['%s' % (param) for param in params] , ', ') else: paramString = '' # Examine the trace stack to get the calling function. funcName = traceback.extract_stack()[-3:-2][0][2] modName = traceback.extract_stack()[-3:-2][0][0] # Format the message based on entry/exit state. if entry.upper() == 'EXIT': entry = 'EXIT' msg = 'Leaving %s::%s()' % (modName, funcName) if paramString: msg += ' with return values: %s' % paramString else: entry = 'ENTRY' msg = 'Entering %s::%s(%s)' % (modName, funcName, paramString) # Time to log the message. self.__log(entry, msg)
def sql_query(query_counter,engine,metric_list,query_frequency_dictionary,query_dictionary,sleep_config,log_identifier,redshift_connection,queue_push): # Get a list of only those queries that are divisible by the time period set by user query_list = [query for (query, period) in query_frequency_dictionary.items() if query_counter%query_frequency_dictionary[query] == 0] print query_list # Query redshift for each of the chosen queries for i in range(0,len(query_list)): try: print query_list[i] query_result_df[query_list[i]] = pd.read_sql_query(query_dictionary[query_list[i]],engine) except: print 'Something broke. connection failure' logging.exception('%s : Redshift connection failure', log_identifier) traceback.extract_stack() #print type(exception).__name__ print query_counter time.sleep(sleep_config) engine = create_rs_engine(log_identifier=log_identifier,redshift_connection=redshift_connection) continue # Increment the count by 1 query_counter += 1 # Put the dataframes on a queue consumed by all threads. for i in range(0,queue_push): query_result_queue.put(query_result_df) return query_counter
def PRINT_EXCEPTION(e=None, stop=False): if not log_level: pass traceback.extract_stack()[-1][1] traceback.print_exc() if stop: pdb.set_trace()
def lock(self): """Create an external file lock for the bundle database.""" from lockfile import FileLock, AlreadyLocked # , LockTimeout import time import traceback from ..dbexceptions import LockedFailed if self._lock: tb = traceback.extract_stack()[-5:-4][0] global_logger.debug( "Already has bundle lock from {}:{}".format( tb[0], tb[1])) return self._lock = FileLock(self.lock_path) for i in range(10): try: tb = traceback.extract_stack()[-5:-4][0] self._lock.acquire(-1) global_logger.debug( "Acquired bundle lock from {}:{}".format( tb[0], tb[1])) return except AlreadyLocked: global_logger.debug("Waiting for bundle lock") time.sleep(1) raise LockedFailed("Failed to acquire lock on {}".format(self.lock_path))
def LockCheck(): global semaphores while 1: each = None Sleep(5 * 60) now = time.time() try: for each in semaphores.keys(): BeNice() if (each.count<=0) and (each.waiting.balance < 0) and (each.lockedWhen and (now - each.lockedWhen)>=(5*MIN)): logger.error("Semaphore %s appears to have threads in a locking conflict."%id(each)) logger.error("holding thread:") try: for s in traceback.format_list(traceback.extract_stack(each.thread.frame,40)): logger.error(s) except: sys.exc_clear() first = each.waiting.queue t = first while t: logger.error("waiting thread %s:"%id(t),4) try: for s in traceback.format_list(traceback.extract_stack(t.frame,40)): logger.error(s,4) except: sys.exc_clear() t = t.next if t is first: break logger.error("End of locking conflict log") except StandardError: StackTrace() sys.exc_clear()
def log(lvl, msg): ''' Formats and prints message to appropriate i/o stream. TODO: perhaps look into replace with logging module ''' ( procName, lineNum, funcName, funcName2 )= extract_stack()[len(extract_stack())-2] msg= "%s, %s, %s, %s, %s, %s\n" % ( datetime.now(), lvl, procName, funcName, lineNum, msg ) ioStream= dict(error= stderr).get(lvl, stdout) print >> ioStream, msg, ioStream.flush()
def add_post(): db = get_db() fichero = request.files['archivo'] lineaFichero=1 for linea in fichero.readlines(): try: partir = linea.split('#') titulo = partir[1] autor = partir[2] texto = partir[3] titulo = titulo.decode('utf-8') autor = autor.decode('utf-8') texto = texto.decode('utf-8') unicode(titulo) unicode(autor) unicode(texto) db.execute('INSERT INTO post (title,author,textillo) VALUES (?,?,?)',[unicode(titulo),unicode(autor),unicode(texto)]) db.commit() lineaFichero+=1 except IndexError as e: import traceback, os.path top = traceback.extract_stack()[-1] flash(str(e)+' - '.join([type(e).__name__, os.path.basename(top[0]), str(top[1])])) except UnicodeDecodeError as e: import traceback, os.path top = traceback.extract_stack()[-1] flash(str(e)+' - '.join([type(e).__name__, os.path.basename(top[0]), str(top[1])])) flash('Entradas agregadas con exito') return redirect(url_for('mostrar_post'))
def test_traceback_stack(self): import sys import traceback def C(): raise Exception def B(): C() def A(): try: B() except: return sys.exc_info()[2] lineno = C.func_code.co_firstlineno tb = A() a = traceback.extract_tb(tb) b = traceback.extract_stack(tb.tb_frame, 1) self.assertEqual(a, [(__file__, 8+lineno, 'A', 'B()'), (__file__, 4+lineno, 'B', 'C()'), (__file__, 1+lineno, 'C', 'raise Exception')]) self.assertEqual([x[2] for x in b], ['A']) # only check that we're in the proper function, the rest does not work properly tb = tb.tb_next a = traceback.extract_tb(tb) b = traceback.extract_stack(tb.tb_frame, 2) self.assertEqual(a, [(__file__, 4+lineno, 'B', 'C()'), (__file__, 1+lineno, 'C', 'raise Exception')]) self.assertEqual([x[2] for x in b], ['A', 'B']) # only check that we're in the proper function, the rest does not work properly tb = tb.tb_next a = traceback.extract_tb(tb) b = traceback.extract_stack(tb.tb_frame, 3) self.assertEqual(a, [(__file__, 1+lineno, 'C', 'raise Exception')]) self.assertEqual([x[2] for x in b], ['A', 'B', 'C']) # only check that we're in the proper function, the rest does not work properly
def format_stack_report(details, exc_info): header = '' header += "Exception\n---------\n" if exc_info: header += ''.join(traceback.format_exception(*exc_info)) header += "\n" # Print out a stack trace too. The exception stack only contains # calls between the try and the exception. try: stack = util.get_nice_stack() except StandardError: stack = traceback.extract_stack() header += "Call Stack\n---------\n" header += ''.join(traceback.format_list(stack)) header += "\n" else: # fake an exception with our call stack try: stack = util.get_nice_stack() except StandardError: stack = traceback.extract_stack() header += ''.join(traceback.format_list(stack)) header += 'UnknownError: %s\n' % details header += "\n" return header
def adminInfo(handler): handler.title('Information') requirePriv(handler, 'Admin') print "<div class=\"info\">" print "<h3>Uptime</h3>" loadTime = getLoadtime() print "Started %s<br>" % loadTime print "Up for %s<br>" % timesince(loadTime) print "Total requests: %d<br>" % server().getTotalRequests() print "<form method=\"post\" action=\"/admin/restart\">" print Button('Restart', type = 'submit').negative() print "</form>" print "<h3>Threads</h3>" print "<table border=\"1\" cellspacing=\"0\" cellpadding=\"4\">" print "<tr><th>ID</th><th class=\"main\">Name</th><th>Alive</th><th>Daemon</th></tr>" for thread in sorted(threads(), key = lambda thread: thread.name): print "<tr><td>%s</td><td>" % ('None' if thread.ident is None else "%x" % abs(thread.ident)) print thread.name print "<br>" try: print CollapsibleBox('Traceback', formatTrace(traceback.extract_stack(sys._current_frames()[thread.ident]))) except Exception: pass print "</td><td class=\"%s\"> </td><td class=\"%s\"> </td></tr>" % ('yes' if thread.isAlive() else 'no', 'yes' if thread.daemon else 'no') print "</table>" print "<h3>Locks</h3>" print "<table border=\"1\" cellspacing=\"0\" cellpadding=\"4\">" print "<tr><th class=\"main\">Name</th><th>Available</th><th>Reentrant</th></tr>" for (name, lock) in sorted(locks.iteritems()): print "<tr><td>" print name avail = lock.avail() if not avail: print "<br>" writer = ResponseWriter() try: owner, tb = lock.owner, lock.tb name = ("%x" % abs(owner)) if owner else 'None' #TODO Is there no O(1) way to do this? for thread in threads(): if thread.ident == owner: name = "%s (%x)" % (thread.name, abs(owner)) break print "Owned by: <b>%s</b><br><br>" % name if tb: print "Acquisition traceback:<br>" print formatTrace(tb) print "<br>" print "Current traceback:<br>" print formatTrace(traceback.extract_stack(sys._current_frames()[owner])) except Exception, e: writer.clear() print "<i>(Unable to retrieve stack trace)</i>" print CollapsibleBox('Ownership', writer.done()) print "</td><td class=\"%s\">%s</td><td class=\"%s\"> </td></tr>" % ('yes' if avail else 'no', ' ' if avail else (lock.owner or '???'), 'yes' if lock.reentrant() else 'no')
def log(self, message): try: file_name = traceback.extract_stack(limit=2)[0][0].split("DistributedSocialNetworking/", 1)[1] line = traceback.extract_stack(limit=2)[0][1] trace = "\n\n" + " line: " + str(line) + " in " + file_name + "\n" self.logger.debug(trace + " Message: " + message + "\n") except: self.logger.debug("\n\n Error while logging message.\n")
def zero_division_no_error(app): try: very_nested_zero_division_no_error(app) except ZeroDivisionError: pass exc_info_1 = sys.exc_info() traceback.extract_stack() return 'ha!'
def enumerate_thread_trace(thread=None): if thread is None: stack = traceback.extract_stack() else: frame = sys._current_frames()[thread if isinstance(thread, int) else thread.ident] stack = traceback.extract_stack(frame) for path, line, function, statement in stack: yield clarify_source_path(path), line, function, statement
def fun1(): global a print a a = "aa" print a print traceback.extract_stack(None, 2)[0][3]
def __call__(self, severity, message, **kw): if not isinstance(severity, Severity): if isinstance(severity, int): severity=Severity(severity) elif isinstance(severity, str): severity=Severity[severity] else: raise TypeError('severity', type(severity), Severity, int, str) if severity<self.severity: return try: if __debug__ and severity >= Error: if 'traceback' in kw: tb=kw['traceback'] stack=traceback.extract_tb(tb) elif 'frame' in kw: f=kw['frame'] stack=traceback.extract_stack(f) elif 'location' in kw: loc=kw['location'] stack=[loc+('',)] else: stack=traceback.extract_stack(sys._getframe(1)) kw.pop('location', None) stack=list(stack) if stack: kw['location']=stack[-1][:-1] kw['traceback']=stack else: kw.pop('location', None) kw.pop('traceback', None) elif self.verbose: if 'location' not in kw: if 'traceback' in kw: tb=kw['traceback'] stack=traceback.extract_tb(tb, limit=1) elif 'frame' in kw: f=kw['frame'] stack=traceback.extract_stack(f, limit=1) else: stack=traceback.extract_stack(sys._getframe(1), limit=1) if stack: kw['location']=stack[0][:-1] else: kw.pop('location', None) kw.pop('traceback', None) else: kw.pop('traceback', None) kw.pop('frame', None) self._call(severity, message, kw) except: traceback.print_exc()
def get_index_html(url): try: req = requests.get(url) except requests.ConnectionError: print traceback.extract_stack() return None else: if req.status_code == 200: return req.text
def debug( message, messagelevel = DEBUG, nolinebreak = False, prefix = None ): if messagelevel >= config.DEBUG_LEVEL: toprint = "" timestring = time.strftime( "%Y/%m/%d %H:%M:%S", time.localtime() ) timestring = timestring + ( "%.3f " % ( time.time() % 1 ) )[1:] timestring = timestring + " (%s) " % threading.currentThread().name timestring = timestring + ": " + IDS[messagelevel][0] if prefix: timestring = timestring + " %s: " % prefix if type( message ) == type( "" ) or type( message ) == unicode: if not nolinebreak: toprint = timestring + ( "\n" + timestring ).join( message.split( "\n" ) ) + "\n" else: toprint = message else: if not nolinebreak: toprint = timestring + ( "\n" + timestring ).join( pprint.pformat( message ).split( "\n" ) ) + "\n" else: toprint = message toprint = "".join( [x for x in toprint if ord( x ) < 128] ) if isParanoiing(): stack = traceback.extract_stack() if len( stack ) > 5: toprint += " File \"%s\", line %d, in %s\n" % ( stack[len( stack ) - 6][0], stack[len( stack ) - 6][1], stack[len( stack ) - 6][2] ) if len( stack ) > 4: toprint += " File \"%s\", line %d, in %s\n" % ( stack[len( stack ) - 5][0], stack[len( stack ) - 5][1], stack[len( stack ) - 5][2] ) if len( stack ) > 3: toprint += " File \"%s\", line %d, in %s\n" % ( stack[len( stack ) - 4][0], stack[len( stack ) - 4][1], stack[len( stack ) - 4][2] ) toprint += " File \"%s\", line %d, in %s\n" % ( stack[len( stack ) - 3][0], stack[len( stack ) - 3][1], stack[len( stack ) - 3][2] ) toprint += " File \"%s\", line %d, in %s\n\n" % ( stack[len( stack ) - 2][0], stack[len( stack ) - 2][1], stack[len( stack ) - 2][2] ) elif isDebugging(): stack = traceback.extract_stack() toprint += " File \"%s\", line %d, in %s\n\n" % ( stack[len( stack ) - 2][0], stack[len( stack ) - 2][1], stack[len( stack ) - 2][2] ) if messagelevel >= ERROR: sys.stderr.write( toprint ) sys.stderr.flush() else: sys.stdout.write( toprint ) sys.stdout.flush()
def getentlist(self,startdate,enddate): pageNos=0 while True: try: pageNos+=1 if pageNos>18673:break req= Request( url='http://218.26.1.108/exceptionInfoSelect.jspx', data=self.getpostdata(pageNos), headers={'User-Agent':'Magic Browser'} ) result=self.gethtml(req) infolist=result.findAll('a',attrs={'target':'_blank'}) regIDlist=result.findAll('li',attrs={'class':'tb-a2'}) datelist=result.findAll('li',attrs={'class':'tb-a3'}) del regIDlist[0] del datelist[0] l=len(datelist) except Exception: self.printpageerror(pageNos) continue else: print('Page %d Reading' % pageNos) br=0 for i in range(l): try: try: cdate=str(datelist[i].contents[0]) reg_m=r'年(.*?)月' reg_d=r'月(.*?)日' except: cdate=unicode(datelist[i].contents[0]) reg_m=u'年(.*?)月' reg_d=u'月(.*?)日' pattern=re.compile(reg_m) month=int(pattern.findall(cdate)[0]) pattern=re.compile(reg_d) day=int(pattern.findall(cdate)[0]) cdate=date(int(cdate[0:4]),month,day) if cdate<startdate: br=1 break else: if cdate<=enddate: Name=infolist[i].contents[0].replace('\n','').strip() if self.checkname(Name)==False:continue regID=self.dealID(regIDlist[i].contents[0]) href=infolist[i].get('href') entdict=dict(Name=Name,regID=regID,Date=cdate,href=href) self.PrintInfo(entdict) except Exception: traceback.extract_stack() self.printitemerror(pageNos,i) continue if br==1:break
def wrap(*arg, **kwargs): """ :param arg: :return: """ if len(traceback.extract_stack()) > 1: mod, line, fun, cmd = traceback.extract_stack()[-2] print("%s was called by %s l.%s at %s" % (function.__name__, cmd, line, mod)) return function(*arg, **kwargs)
def reset_attack_info(self): if not self.attacking: return print "[ pc ]", "stop attacking from", print traceback.extract_stack()[-2][2] self.attacking = False with self.e.lock_pclist: self.attacking = False self.attacking_target = None self.attacking_delay = 0
def configuration(**kwargs): defaults = { 'role': 'local', } # Find the callstack so we can identify the directory of our caller and use their local config. if kwargs.get('root') is None: stack = traceback.extract_stack() framework_dir = os.path.abspath(os.path.dirname(__file__)) # Beginning with -2, the caller for index in xrange(-2, -1 * (len(stack) + 1), -1): caller_dir = os.path.abspath(os.path.dirname(traceback.extract_stack()[index][0])) # Exclude our own directory if caller_dir != framework_dir: defaults['root'] = caller_dir break if not 'root' in defaults: defaults['root'] = os.getcwd() spec = dict(defaults) # Check if a role was specified, or an explicit config file given. parser = clparser(options=kwargs.get('options', {})) if 'options' in kwargs: del kwargs['options'] spec.update(kwargs) class attrdict(dict): def __setattr__(self, key, value): return self.__setitem__(key, value) (options, args) = parser.parse_args(sys.argv, values=attrdict()) custom_options = {} for key in options.keys(): if key.startswith('custom_options'): custom_options[key.lstrip('custom_options.')] = options[key] del options[key] if 'custom_options' in options: del options['custom_options'] spec.update(options) # Compile a list of potential config file names. filenames = [] if 'config' in options: filenames.append(spec['config']) else: filenames.append('default.cfg') filenames.append(spec['role'] + '.cfg') filenames.append(os.uname()[1] + '.cfg') filenames = [os.path.abspath(os.path.join(spec['root'], filename)) for filename in filenames] settings = collections.defaultdict(lambda: {}) parser = ConfigParser.SafeConfigParser() parser.read(filenames) for section in parser.sections(): settings[section] = {} for option in parser.options(section): settings[section][option] = parser.get(section, option) for option_name in custom_options.keys(): (section, option) = option_name.split('.') settings[section][option] = custom_options[option_name] return settings
def compute_traceback_astext(traceback_object=None, flag_fulltrace=True): """Convert traceback object to nice text. See http://effbot.org/librarybook/traceback.htm.""" import sys if (traceback_object==None): traceback_object = sys.exc_info()[2] if (flag_fulltrace): tblist = traceback.extract_stack() + traceback.extract_tb(traceback_object) else: tblist = traceback.extract_stack() + traceback.extract_tb(traceback_object) # return compute_traceback_astext_fromlist(tblist)
def __get_my_name(self): ''' This is a hack to get the name of the class instance variable. For internal use only. ''' i = -3 (filename, line_number, function_name, text) = traceback.extract_stack()[i] name = text[:text.find('=')].strip() while 'self' in name: i -= 1 (filename, line_number, function_name, text) = traceback.extract_stack()[i] name = name.replace('self', text[:text.find('=')].strip()) return name
def new(*args, **kwargs): print >> sys.stderr, 'Function %s is deprecated.' % old.__name__ trace = traceback.extract_stack() for line in traceback.format_list(trace[:-1]): stderr(line[:-1]) return old(*args, **kwargs)
def __init__(self, method: str, url: URL, *, params: Optional[Mapping[str, str]]=None, headers: Optional[LooseHeaders]=None, skip_auto_headers: Iterable[str]=frozenset(), data: Any=None, cookies: Optional[LooseCookies]=None, auth: Optional[BasicAuth]=None, version: http.HttpVersion=http.HttpVersion11, compress: Optional[str]=None, chunked: Optional[bool]=None, expect100: bool=False, loop: Optional[asyncio.AbstractEventLoop]=None, response_class: Optional[Type['ClientResponse']]=None, proxy: Optional[URL]=None, proxy_auth: Optional[BasicAuth]=None, timer: Optional[BaseTimerContext]=None, session: Optional['ClientSession']=None, ssl: Union[SSLContext, bool, Fingerprint, None]=None, proxy_headers: Optional[LooseHeaders]=None, traces: Optional[List['Trace']]=None): if loop is None: loop = asyncio.get_event_loop() assert isinstance(url, URL), url assert isinstance(proxy, (URL, type(None))), proxy # FIXME: session is None in tests only, need to fix tests # assert session is not None self._session = cast('ClientSession', session) if params: q = MultiDict(url.query) url2 = url.with_query(params) q.extend(url2.query) url = url.with_query(q) self.original_url = url self.url = url.with_fragment(None) self.method = method.upper() self.chunked = chunked self.compress = compress self.loop = loop self.length = None if response_class is None: real_response_class = ClientResponse else: real_response_class = response_class self.response_class = real_response_class # type: Type[ClientResponse] self._timer = timer if timer is not None else TimerNoop() self._ssl = ssl if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) self.update_version(version) self.update_host(url) self.update_headers(headers) self.update_auto_headers(skip_auto_headers) self.update_cookies(cookies) self.update_content_encoding(data) self.update_auth(auth) self.update_proxy(proxy, proxy_auth, proxy_headers) self.update_body_from_data(data) if data or self.method not in self.GET_METHODS: self.update_transfer_encoding() self.update_expect_continue(expect100) if traces is None: traces = [] self._traces = traces
def current_frames_with_threads(self): import threading import traceback # Spawn a thread that blocks at a known place. Then the main # thread does sys._current_frames(), and verifies that the frames # returned make sense. entered_g = threading.Event() leave_g = threading.Event() thread_info = [] # the thread's id def f123(): g456() def g456(): thread_info.append(threading.get_ident()) entered_g.set() leave_g.wait() t = threading.Thread(target=f123) t.start() entered_g.wait() # At this point, t has finished its entered_g.set(), although it's # impossible to guess whether it's still on that line or has moved on # to its leave_g.wait(). self.assertEqual(len(thread_info), 1) thread_id = thread_info[0] d = sys._current_frames() for tid in d: self.assertIsInstance(tid, int) self.assertGreater(tid, 0) main_id = threading.get_ident() self.assertIn(main_id, d) self.assertIn(thread_id, d) # Verify that the captured main-thread frame is _this_ frame. frame = d.pop(main_id) self.assertTrue(frame is sys._getframe()) # Verify that the captured thread frame is blocked in g456, called # from f123. This is a litte tricky, since various bits of # threading.py are also in the thread's call stack. frame = d.pop(thread_id) stack = traceback.extract_stack(frame) for i, (filename, lineno, funcname, sourceline) in enumerate(stack): if funcname == "f123": break else: self.fail("didn't find f123() on thread's call stack") self.assertEqual(sourceline, "g456()") # And the next record must be for g456(). filename, lineno, funcname, sourceline = stack[i + 1] self.assertEqual(funcname, "g456") self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"]) # Reap the spawned thread. leave_g.set() t.join()
def extract(**kwargs): return traceback.extract_stack(frame, **kwargs)
def indent(): indentation_level = len(traceback.extract_stack()) return indentation_level - 5
def msg(self, message, log_type=None, log_stack=True, write_on_file=True, send_output=True, use_blink=False): """ Invia un messaggio di log contenente errori o avvisi. """ if not message: print("[log.bug] message non è un parametro valido: %r" % message) return # ------------------------------------------------------------------------- from src.enums import LOG # Questa extract_stack è un collo di bottiglia prestazionale del Mud, # tuttavia la sua utilità è indubbia e quindi fino a che Aarit non sarà # maturissimo è inutile anche evitarla con qualche opzione di config. # Ho deciso tuttavia di saltare il print della last_function relativa # a tutti i messaggi di log reset, che sono quelli maggiormente inviati stack = None last_function = "" last_function = "" if not log_type or log_type.show_last_function: stack = traceback.extract_stack() last_function = str(stack[-3][2]) # Stampa il messaggio alla console e lo scrive sul file if stack: source_name = stack[-3][0].replace(os.getcwd(), "") if "src" in source_name: position = source_name.find("src") source_name = source_name[position:] elif "data" in source_name: position = source_name.find("data") source_name = source_name[position:] source_line = stack[-3][1] last_function = " (%s %s %s)" % (last_function, source_name, source_line) # Rimuove i colori dal messaggio per una stampa a video maggiormente amica from src.color import remove_colors message = remove_colors(message) # Invia il messaggio di log agli Admin del Mud if send_output: from src.database import database if "players" in database and log_type.show_on_mud: from src.utility import html_escape for player in database["players"].itervalues(): if not player.game_request: continue if player.trust < log_type.trust and str( log_type.code) not in player.permissions: continue if log_type != LOG.ALWAYS and log_type not in player.account.show_logs and str( log_type.code) not in player.permissions: continue open_span = "" close_span = "" if use_blink: open_span = "<span style='text-decoration: blink;'>" close_span = "</span>" player.send_output("<br>[magenta]%s%s %s%s[close]" % (open_span, last_function.lstrip(), html_escape(message), close_span), avoid_log=True) player.send_prompt() # Visto che anche gli altri loop sono legati a questo, non c'è bisogno # di controllarle tutti from src.loops.aggressiveness import aggressiveness_loop from src.loops.blob import blob_loop from src.entitypes.corpse import decomposer_loop from src.loops.digestion import digestion_loop from src.fight import fight_loop from src.game import game_loop from src.maintenance import maintenance_loop from src.behaviour import room_behaviour_loop if (game_loop and game_loop.running and maintenance_loop and maintenance_loop.running and room_behaviour_loop and room_behaviour_loop.running and fight_loop and fight_loop.running and decomposer_loop and decomposer_loop.running and aggressiveness_loop and aggressiveness_loop.running and blob_loop and blob_loop.running and digestion_loop and digestion_loop.running): loop_status = "L" else: loop_status = "l" now = datetime.datetime.now() message = "%02d:%02d:%02d [%s] {%s}%s: %s" % ( now.hour, now.minute, now.second, log_type, loop_status, last_function, message) log_file = None if write_on_file and log_type.write_on_file: log_path = "log/%d-%02d-%02d.log" % (now.year, now.month, now.day) try: log_file = open(log_path, "a") except IOError: print "Impossibile aprire il file %s in append" % log_path log_file = None else: log_file.write("%s\n" % message) # Questo viene fatto perché alcune console purtroppo non supportano # i caratteri accentati e simili, viene così convertito l'accento # nel famoso e muddoso accento apostrofato, quindi attenzione che # in tal caso il messaggio nello stdout è falsato da quello originale # nel qual caso si voglia cercarlo nel codice if log_type.print_on_console: from src.config import config if config.ready and config.log_accents: if "à" in message: message = message.replace("à", "a'") if "è" in message: message = message.replace("è", "e'") if "é" in message: message = message.replace("é", "e'") if "ì" in message: message = message.replace("ì", "i'") if "ò" in message: message = message.replace("ò", "o'") if "ù" in message: message = message.replace("ù", "u'") print(message) # Se la tipologia di log non è un bug allora evita le informazioni di stack if log_type != LOG.BUG: log_stack = False if stack and write_on_file and log_type.write_on_file and log_stack: try: traceback.print_stack(file=log_file) traceback.print_stack(file=sys.stdout) except IOError: # (TT) non ho capito bene come mai mi fa così, ma semplicemente # saltandolo mi evita di fare il traceback, cmq il log avviene pass if log_file: log_file.close()
def extract(): return traceback.extract_stack()
def whoami(): stack = traceback.extract_stack() file_name, codeline, func_name, text = stack[-2] return func_name
def remove_tag(tag_to_remove, lambda_arn): func_name = traceback.extract_stack(None, 2)[0][2] response = lambda_client.untag_resource(Resource=lambda_arn, TagKeys=[tag_to_remove]) logger.info(":{0}: response = [{1}]".format(func_name, str(response)))
def apply(self, inputs): """Computes output variables and grows the computational graph. Basic behavior is expressed in the documentation of :class:`FunctionNode`. .. note:: If the :data:`~Variable.data` attribute of input variables exist on a GPU device, that device is made current before calling :meth:`forward`, so implementors do not need to take care of device selection in most cases. Args: inputs: Tuple of input variables. Each element can be either :class:`~chainer.Variable`, :class:`numpy.ndarray`, or :class:`cupy.ndarray`. If the element is an ndarray, it is automatically wrapped with :class:`~chainer.Variable`. Returns: A tuple of output :class:`~chainer.Variable` objects. """ input_vars = [chainer.as_variable(x) for x in inputs] in_data = tuple([x.data for x in input_vars]) requires_grad = any([x.requires_grad for x in input_vars]) # Check for input array types xp = cuda.get_array_module(*in_data) if not all([x is None or isinstance(x, xp.ndarray) for x in in_data]): raise ValueError( 'numpy and cupy arrays are mixed in the forward input ' '({}).\n' '{}'.format(self.label, ', '.join(str(type(x)) for x in in_data))) is_debug = chainer.is_debug() if is_debug: # Keep stack trace for debug self.stack = traceback.extract_stack() if configuration.config.type_check: self._check_data_type_forward(in_data) hooks = chainer.get_function_hooks() if self._n_local_function_hooks > 0: hooks = collections.OrderedDict(hooks) hooks.update(self.local_function_hooks) hooks = hooks.values() # avoid six for performance for hook in hooks: hook.forward_preprocess(self, in_data) # Forward propagation with cuda.get_device_from_array(*in_data): self._input_indexes_to_retain = None self._output_indexes_to_retain = None outputs = self.forward(in_data) # Check for output array types if not isinstance(outputs, tuple): raise TypeError('forward output must be a tuple ({})\n' 'Actual: {}'.format(self.label, type(outputs))) xp = cuda.get_array_module(*outputs) if not all([x is None or isinstance(x, xp.ndarray) for x in outputs]): raise ValueError( 'numpy and cupy arrays are mixed in the forward output ' '({}).\n' '{}'.format(self.label, ', '.join(str(type(x)) for x in outputs))) for hook in hooks: hook.forward_postprocess(self, in_data) # NaN check of output values if is_debug: if any(out.dtype.kind == 'f' and cuda.get_array_module(out).isnan(out).any() for out in outputs): msg = ('NaN is detected on forward computation of ' '{}'.format(self.label)) raise RuntimeError(msg) ret = tuple([ variable.Variable(y, requires_grad=requires_grad) for y in outputs ]) if configuration.config.enable_backprop: # Topological ordering self.rank = max([x.rank for x in input_vars]) if input_vars else 0 # Add backward edges for y in ret: y.creator_node = self self.inputs = tuple([x.node for x in input_vars]) # Add forward edges (must be weak references) self.outputs = tuple([weakref.ref(y.node) for y in ret]) if self._input_indexes_to_retain is not None: for index in self._input_indexes_to_retain: input_vars[index].retain_data() if self._output_indexes_to_retain is not None: retained_data = [] for index in self._output_indexes_to_retain: ret[index].retain_data() retained_data.append(outputs[index]) self._retained_output_data = tuple(retained_data) return ret
def callFunInTheMainThread(self, fun_to_call, *args): """ This method is called from BG threads. Its purpose is to run 'fun_to_call' from main thread (used for dialogs) and return values ruturned from it. :param fun_to_call: ref to a function which is to be called :param args: args passed to the function fun_to_call :return: return value from fun_to_call """ exception_to_rethrow = None ret = None try: if threading.current_thread() != threading.main_thread(): # check whether the main thread waits for the lock acquired by the current thread # if so, raise deadlock detected exception dl_check = thread_utils.EnhRLock.detect_deadlock( threading.main_thread()) if dl_check is not None: # find a caller of the current method (skip callers from the current module) caller_file = '' caller_line = '' for si in reversed(traceback.extract_stack()): if si.name != 'callFunInTheMainThread': caller_file = si.filename caller_line = si.lineno break raise DeadlockException( 'Deadlock detected. Trying to synchronize with the main thread (c), which ' 'is waiting (b) for a lock acquired by this thread (a).\n' ' CURRENT_THREAD ->(a)[LOCK]--->(c)[MAIN_THREAD]\n' ' MAIN_THREAD ---->(b)[LOCK]\n' ' a. file "%s", line %s\n' ' b. file "%s", line %s\n' ' c. file "%s", line %s' % (dl_check[2], dl_check[3], dl_check[0], dl_check[1], caller_file, caller_line)) mutex = QtCore.QMutex() mutex.lock() locked = False try: self.fun_call_exception = None self.fun_call_ret_value = None # emit signal to call the function fun in the main thread self.fun_call_signal.emit(fun_to_call, args, mutex) # wait for the function to finish; lock will be successful only when the first lock # made a few lines above is released in the funCallSignalled method tm_begin = time.time() locked = mutex.tryLock(3600000) # wait 1h max tm_diff = time.time() - tm_begin if not locked: logging.exception( "Problem communicating with the main thread - couldn't lock mutex. Lock " "wait time: %ss." % str(tm_diff)) raise Exception( "Problem communicating with the main thread - couldn't lock mutex. Lock " "wait time: %ss." % str(tm_diff)) ret = self.fun_call_ret_value finally: if locked: mutex.unlock() del mutex if self.fun_call_exception: # if there was an exception in the fun, pass it to the calling code exception_to_rethrow = self.fun_call_exception else: return fun_to_call(*args) except DeadlockException: raise except Exception as e: logging.exception( 'ThreadWndUtils.callFunInTheMainThread error: %s' % str(e)) raise if exception_to_rethrow: raise exception_to_rethrow return ret
def apply(self, inputs): """Computes output variables and grows the computational graph. Basic behavior is expressed in the documentation of :class:`FunctionNode`. .. note:: If the :data:`~Variable.data` attribute of input variables exist on a GPU device, that device is made current before calling :meth:`forward`, so implementors do not need to take care of device selection in most cases. Args: inputs: Tuple of input variables. Each element can be either :class:`~chainer.Variable`, :class:`numpy.ndarray`, or :class:`cupy.ndarray`. If the element is an ndarray, it is automatically wrapped with :class:`~chainer.Variable`. Returns: A tuple of output :class:`~chainer.Variable` objects. """ chainerx_in_data = None chainerx_device = None is_chainerx, in_data = _extract_apply_in_data(inputs) if is_chainerx: # Try ChainerX C++ implementation. # If it's supported, the output arrays are wrapped with Variables # and returned. # If not supported, FunctionNode.forward_chainerx should return # Fallback. # In that case the input arrays are converted to numpy.ndarray # or cupy.ndarray (depending on the ChainerX backend) and # forward computation falls back to the conventional # FunctionNode.forward() implementaion. outputs = self.forward_chainerx(in_data) if outputs is not chainer.Fallback: # Supported. Wrap with variables and return assert isinstance(outputs, tuple) return tuple([ variable.Variable._init_unchecked( y, requires_grad=y.is_backprop_required(), is_chainerx_array=True) for y in outputs ]) # Fall back to FunctionNode.forward() chainerx_in_data, in_data, chainerx_device = ( self._chainerx_apply_fallback_preprocess(in_data, inputs)) self._is_chainerx_fallback_mode = True self.chainerx_device = chainerx_device utils._check_arrays_forward_compatible(in_data, self.label) is_debug = chainer.is_debug() if is_debug: # Keep stack trace for debug self.stack = traceback.extract_stack() if configuration.config.type_check: self._check_data_type_forward(in_data) hooks = chainer.get_function_hooks() if self._n_local_function_hooks > 0: hooks = collections.OrderedDict(hooks) hooks.update(self.local_function_hooks) hooks = hooks.values() # avoid six for performance for hook in hooks: hook.forward_preprocess(self, in_data) # Forward propagation with cuda.get_device_from_array(*in_data): self._input_indexes_to_retain = None self._output_indexes_to_retain = None if chainer.config.schedule_func is not None: outputs = static_forward_optimizations(self, in_data) elif self._is_chainerx_fallback_mode: # In ChainerX fallback, __class__ is temporarily replaced with # the fabricated one with automatic attirbute fallback. with _chainerx_attribute_fallback(self, chainerx_device): outputs = self.forward(in_data) else: # In normal case, simply run the forward method. outputs = self.forward(in_data) # Check for output array types if not isinstance(outputs, tuple): raise TypeError('forward output must be a tuple ({})\n' 'Actual: {}'.format(self.label, type(outputs))) if not chainer.is_arrays_compatible(outputs): raise TypeError( 'incompatible array types are mixed in the forward output ' '({}).\n' 'Actual: {}'.format(self.label, ', '.join(str(type(x)) for x in outputs))) for hook in hooks: hook.forward_postprocess(self, in_data) # NaN check of output values if is_debug: if any(chainer.backend._contains_nan(out) for out in outputs): msg = ('NaN is detected on forward computation of ' '{}'.format(self.label)) raise RuntimeError(msg) self._output_count = len(outputs) if self._is_chainerx_fallback_mode: ret = self._chainerx_apply_fallback_postprocess( chainerx_in_data, inputs, outputs) else: input_vars = [chainer.as_variable(x) for x in inputs] requires_grad = any([x.requires_grad for x in input_vars]) ret = tuple([ variable.Variable(y, requires_grad=requires_grad) for y in outputs ]) if configuration.config.enable_backprop: # Topological ordering self.rank = max([x.rank for x in input_vars]) if input_vars else 0 # Add backward edges for y in ret: y.creator_node = self self.inputs = tuple([x.node for x in input_vars]) # Add forward edges (must be weak references) self.outputs = tuple([weakref.ref(y.node) for y in ret]) if self._input_indexes_to_retain is not None: for index in self._input_indexes_to_retain: input_vars[index].retain_data() if self._output_indexes_to_retain is not None: retained_data = [] for index in self._output_indexes_to_retain: ret[index].retain_data() retained_data.append(outputs[index]) self._retained_output_data = tuple(retained_data) self.lazy_grad_sum = configuration.config.lazy_grad_sum return ret
# Define a function here. def temp_convert(var): try: return int(var) except ValueError as Argument: print Argument.message print "The argument does not contain numbers\n", Argument # Call above function here. temp_convert("xyz") import traceback import sys try: Argument = "xxxx" raise Exception, Argument except Exception, Argument: exc_type, exc_value, exc_traceback = sys.exc_info() print " try catch ", Argument ''' traceback.print_exception(exc_type, exc_value, exc_traceback, limit=3, file=sys.stdout) ''' s = traceback.extract_stack() print s
def debug(__x): '''Devuelve el nombre_var = valor_var, type: tipo_var''' print( traceback.extract_stack(limit=2)[0][3][6:][:-1], "=", __x, ', type:', type(__x))
def warn(*args, **kwargs): """Warn user with traceback to warning.""" tb = traceback.extract_stack() _old_warn(*args, **kwargs) print("".join(traceback.format_list(tb)[:-1]))
def __init__(self): grandcaller, caller, _ = traceback.extract_stack(None, 3) Pfx.__init__(self, "at %s:%d %s(), called from %s:%d %s()", caller[0], caller[1], caller[2], grandcaller[0], grandcaller[1], grandcaller[2])
def __init__( self, *, connector: Optional[BaseConnector] = None, loop: Optional[asyncio.AbstractEventLoop] = None, cookies: Optional[LooseCookies] = None, headers: Optional[LooseHeaders] = None, skip_auto_headers: Optional[Iterable[str]] = None, auth: Optional[BasicAuth] = None, json_serialize: JSONEncoder = json.dumps, request_class: Type[ClientRequest] = ClientRequest, response_class: Type[ClientResponse] = ClientResponse, ws_response_class: Type[ ClientWebSocketResponse] = ClientWebSocketResponse, # noqa version: HttpVersion = http.HttpVersion11, cookie_jar: Optional[AbstractCookieJar] = None, connector_owner: bool = True, raise_for_status: bool = False, read_timeout: Union[float, object] = sentinel, conn_timeout: Optional[float] = None, timeout: Union[object, ClientTimeout] = sentinel, auto_decompress: bool = True, trust_env: bool = False, requote_redirect_url: bool = True, trace_configs: Optional[List[TraceConfig]] = None) -> None: if loop is None: if connector is not None: loop = connector._loop loop = get_running_loop(loop) if connector is None: connector = TCPConnector(loop=loop) if connector._loop is not loop: raise RuntimeError( "Session and connector has to use same event loop") self._loop = loop if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) if cookie_jar is None: cookie_jar = CookieJar(loop=loop) self._cookie_jar = cookie_jar if cookies is not None: self._cookie_jar.update_cookies(cookies) self._connector = connector # type: BaseConnector self._connector_owner = connector_owner self._default_auth = auth self._version = version self._json_serialize = json_serialize if timeout is sentinel: self._timeout = DEFAULT_TIMEOUT if read_timeout is not sentinel: warnings.warn( "read_timeout is deprecated, " "use timeout argument instead", DeprecationWarning, stacklevel=2) self._timeout = attr.evolve(self._timeout, total=read_timeout) if conn_timeout is not None: self._timeout = attr.evolve(self._timeout, connect=conn_timeout) warnings.warn( "conn_timeout is deprecated, " "use timeout argument instead", DeprecationWarning, stacklevel=2) else: self._timeout = timeout # type: ignore if read_timeout is not sentinel: raise ValueError("read_timeout and timeout parameters " "conflict, please setup " "timeout.read") if conn_timeout is not None: raise ValueError("conn_timeout and timeout parameters " "conflict, please setup " "timeout.connect") self._raise_for_status = raise_for_status self._auto_decompress = auto_decompress self._trust_env = trust_env self._requote_redirect_url = requote_redirect_url # Convert to list of tuples if headers: headers = CIMultiDict(headers) else: headers = CIMultiDict() self._default_headers = headers if skip_auto_headers is not None: self._skip_auto_headers = frozenset( [istr(i) for i in skip_auto_headers]) else: self._skip_auto_headers = frozenset() self._request_class = request_class self._response_class = response_class self._ws_response_class = ws_response_class self._trace_configs = trace_configs or [] for trace_config in self._trace_configs: trace_config.freeze()
def extract_stack(stack): for filename, lineno, name, line in traceback.extract_stack(stack): yield 'File: "%s", line %d, in %s' % (filename, lineno, name) if line: yield " %s" % (line.strip(), )
def get_subroutine_name(): import traceback return traceback.extract_stack(None, 2)[0][2]
def __repr__(self): # PUBDEV-2278: using <method>? from IPython caused everything to dump stk = traceback.extract_stack() if not ("IPython" in stk[-2][0] and "info" == stk[-2][2]): self.show() return ""
def __init__(self, msg='Unknown Error', include_traceback=True): self.msg = msg if include_traceback: self.traceback = traceback.extract_stack()
def get_function_name(): return traceback.extract_stack(None, 2)[0][2]
def add_tag(key, value, lambda_arn): func_name = traceback.extract_stack(None, 2)[0][2] tags = {key: value} logger.debug(":{0}: lambda_arn = [{1}]".format(func_name, lambda_arn)) res = lambda_client.tag_resource(Resource=lambda_arn, Tags=tags) logger.info(":{0}:response from add tag {1}".format(func_name, res))
def log(self, level, text): if not self.log_adapter: return info = traceback.extract_stack(limit=2)[0] # Caller frame info self.log_adapter.log(level, text, info[0], info[1])
def report_internal_error(err: Exception, file: Optional[str], line: int, errors: Errors, options: Options, stdout: Optional[TextIO] = None, stderr: Optional[TextIO] = None, ) -> None: """Report internal error and exit. This optionally starts pdb or shows a traceback. """ stdout = (stdout or sys.stdout) stderr = (stderr or sys.stderr) # Dump out errors so far, they often provide a clue. # But catch unexpected errors rendering them. try: for msg in errors.new_messages(): print(msg) except Exception as e: print("Failed to dump errors:", repr(e), file=stderr) # Compute file:line prefix for official-looking error messages. if file: if line: prefix = '{}:{}: '.format(file, line) else: prefix = '{}: '.format(file) else: prefix = '' # Print "INTERNAL ERROR" message. print('{}error: INTERNAL ERROR --'.format(prefix), 'Please try using mypy master on Github:\n' 'https://mypy.readthedocs.io/en/stable/common_issues.html' '#using-a-development-mypy-build', file=stderr) if options.show_traceback: print('Please report a bug at https://github.com/python/mypy/issues', file=stderr) else: print('If this issue continues with mypy master, ' 'please report a bug at https://github.com/python/mypy/issues', file=stderr) print('version: {}'.format(mypy_version), file=stderr) # If requested, drop into pdb. This overrides show_tb. if options.pdb: print('Dropping into pdb', file=stderr) import pdb pdb.post_mortem(sys.exc_info()[2]) # If requested, print traceback, else print note explaining how to get one. if options.raise_exceptions: raise err if not options.show_traceback: if not options.pdb: print('{}: note: please use --show-traceback to print a traceback ' 'when reporting a bug'.format(prefix), file=stderr) else: tb = traceback.extract_stack()[:-2] tb2 = traceback.extract_tb(sys.exc_info()[2]) print('Traceback (most recent call last):') for s in traceback.format_list(tb + tb2): print(s.rstrip('\n')) print('{}: {}'.format(type(err).__name__, err), file=stdout) print('{}: note: use --pdb to drop into pdb'.format(prefix), file=stderr) # Exit. The caller has nothing more to say. # We use exit code 2 to signal that this is no ordinary error. raise SystemExit(2)
def point_in_time_restore(fragment, stack_of_interest, deployed_template): # Restoring to point in time func_name = traceback.extract_stack(None, 2)[0][2] replace_with_snapshot = os.environ.get( 'replace_with_snapshot').rstrip().lower() restore = os.environ['restore_point_in_time'].rstrip() restore_time = os.environ['restore_time'].rstrip() resp = None if restore and replace_with_snapshot != "true": lambda_arn = get_function_arn(os.environ['AWS_LAMBDA_FUNCTION_NAME']) instances = client.describe_db_instances() target_db_instance, restored_snap_shot_id = get_tagged_db_instance( lambda_arn) state = None if target_db_instance: state = get_instance_state(target_db_instance, instances) snapshot_state = None if restored_snap_shot_id: snapshot_state = get_snapshot_state(restored_snap_shot_id) logger.info( "snapshot_id = [{}] state = [{}] target_db_instance_id = [{}] state = [{}] " ) if state == None and snapshot_state == None: db_instance = parse_db_identifier(instances, stack_of_interest) target_db_instance = "tdi" + str(uuid.uuid4()) logger.info( ":{0}:pefrorming point in time restore curent_db_instance = [{1}] taget_db_instance = [{2}] state = [{3}] restore_time=[{4}]" .format(func_name, db_instance, target_db_instance, state, restore_time)) try: if restore.lower() == "true" and restore_time: backup_retention_period = get_back_retention_period( instances, db_instance) if not check_if_point_in_time_date_is_valid( restore_time, backup_retention_period): raise Exception( "Supplied date {0} is not valid".format( restore_time)) resp = client.restore_db_instance_to_point_in_time( SourceDBInstanceIdentifier=db_instance, TargetDBInstanceIdentifier=target_db_instance, RestoreTime=restore_time) elif restore.lower() == "true": resp = client.restore_db_instance_to_point_in_time( SourceDBInstanceIdentifier=db_instance, TargetDBInstanceIdentifier=target_db_instance, UseLatestRestorableTime=True) logger.info( ":{0}:response from point in time restore = {1}".format( func_name, resp)) if resp: add_tag( point_in_time_db_instance_tag, "{0}:{1}".format( target_db_instance, resp['DBInstance']['DBInstanceStatus']), lambda_arn) except ClientError as e: stack_trace = _format_stacktrace() logger.error( ":{0}:POINT_IN_TIME_RESTORE:problems creating point in time restore: stack_trace={1}" .format(func_name, stack_trace)) elif target_db_instance and state.lower() in "available": restored_snapshot_id = "rsi" + str(uuid.uuid4()) logger.debug( ":{0}:POINT_IN_TIME_RESTORE_CREATING_SNAPSHOT: snapshotid = [{1}]" .format(func_name, restored_snapshot_id)) res = client.create_db_snapshot( DBSnapshotIdentifier=restored_snapshot_id, DBInstanceIdentifier=target_db_instance) logger.info( ":{0}:response from create_snapshot_of_point_in_time={1}". format(func_name, str(res))) ss = res['DBSnapshot']['Status'] if ss.lower() == 'available': _create_snapshot_point_in_time(fragment, restored_snapshot_id) else: add_tag( point_in_time_snapshot_db_instance_tag, "{0}:{1}:{2}".format(restored_snapshot_id, ss.lower(), target_db_instance), lambda_arn) remove_tag(point_in_time_db_instance_tag, lambda_arn) #delete_db_instance(target_db_instance) elif restored_snap_shot_id and snapshot_state.lower() in "available": _create_snapshot_point_in_time(fragment, restored_snap_shot_id) db = get_tagged_db_instance_from_restore_id(lambda_arn) delete_db_instance(db) remove_tag(point_in_time_snapshot_db_instance_tag, lambda_arn) else: if state != None: logger.info(":{0}: state of point in time restore {1}", func_name, str(state)) else: logger.info(":{0}: state of point in time snaphost {1}", func_name, snapshot_id) return fragment
def get_function_arn(f_name): func_name = traceback.extract_stack(None, 2)[0][2] resp = lambda_client.get_function(FunctionName=f_name) return str(resp['Configuration']['FunctionArn'])
def get_tb(s): return traceback.extract_stack(limit=2)
def log(msg, log_level=LOG_LEVEL): global LOG_LEVEL if log_level <= LOG_LEVEL: print(str(log_level) + ' : ' + FILE + ' ::' + traceback.extract_stack()[-2][2] + ' : ' + msg)
def ap(e): result = traceback.extract_stack(e) if result is None: return nothing else: return just(result)