def wrapper(*arg, **kwargs): print "import %s modlues" %name __import__(name) frame = inspect.currentframe() print inspect.getargvalues(frame)
def log(self, level, msg): self.level = _get_level(level) self.message = msg exec_frame = sys._getframe(2) code_info = inspect.getframeinfo(exec_frame, context=0) self.filename = code_info[0][:200] self.lineno = int(code_info[1]) self.funcname = code_info[2][:100] for item in settings.INSTALLED_APPS: if item in self.filename: self.appname = item break if sys.exc_info()[0] is not None: self.exc_info = sys.exc_info()[:200] sys.exc_clear() try: self.uname = os.uname() self.process = int(os.getpid()) except: pass self.arguments = str(inspect.getargvalues(exec_frame)) if 'request' in inspect.getargvalues(exec_frame)[3]: request = inspect.getargvalues(exec_frame)[3]['request'] if request.user.is_authenticated(): self.user = str(request .user) self.user_email = request.user.email else: self.user = '******' self.save() self._send_error()
def show_stack(): for level in inspect.stack(): frame,filename,line_num,func,src_code,src_index = level print '%s[%d]\n->%s' % (filename, line_num, src_code[src_index].strip(), ) print inspect.getargvalues(frame) print
def f1(a,b): print a+b cf = inspect.currentframe() print inspect.getargvalues(cf) while cf.f_back.f_back: cf = cf.f_back print cf.f_locals arg = inspect.getargvalues(cf) print inspect.formatargvalues(arg)
def recurse(limit): local_variable = '.' * limit if limit <= 0: for frame, filename, line_num, func, source_code, source_index in inspect.stack(): print '%s[%d]\n -> %s' % (filename, line_num, source_code[source_index].strip()) print inspect.getargvalues(frame) print return recurse(limit - 1) return
def _get_caller_parameter(self, pos=1, pattern='process_', func=None): if func: return getargspec(func).args[pos] depth, func_name = 1, '' argInfo = getargvalues(stack()[depth][0]) while not func_name.startswith(pattern): argInfo = getargvalues(stack()[depth][0]) func_name = stack()[depth][3] depth += 1 return argInfo.args[pos], argInfo.locals[argInfo.args[pos]]
def import_required(name): def wrapper(f, *args, **kwargs): print "Call %s" %f.func_name try: __import__(name) except ImportError, e: print "import %s failed" %name frame = inspect.currentframe() print inspect.getargvalues(frame) return f
def gentmpdir(): frames = inspect.currentframe() callingclass = inspect.getargvalues(inspect.stack()[1][0])[3].get('self',None) if callingclass == None: caller = inspect.getargvalues(inspect.stack()[1][0])[3]['__path__'][0].split('/')[-1] else: caller = str(callingclass.__class__) dirname = tmpbasedir +"/"+caller if not os.path.exists(dirname): os.makedirs(dirname) return dirname
def logFunctionAndArgs(self): frame = inspect.getouterframes(inspect.currentframe())[1][0] args, _, _, values = inspect.getargvalues(frame) frameinfo = inspect.getframeinfo(frame) functionName=inspect.getframeinfo(frame)[2] output = "" for arg in args[1:]: #[1:] skip the first argument 'self' value = values[arg] if isinstance(value, str): #add apostrophes for string values value = "\'"+value+"\'" elif isinstance(value, int): value = ''.join('%02X' % value) else: newValue = "" for i in value: if isinstance(i, int): newValue += '%02X' % i else: newValue += str(i) value = newValue output += arg + '=' + value if arg != args[-1]: #add comma if not the last element output +=',' #do not print "\n' as a new line output = output.replace("\n","\\n") self.logging.info("--> "+functionName+'('+output+')')
def BranchAdminAuthenticate_SetSession(HttpRequest): ip = HttpRequest.META['REMOTE_ADDR'] logindetails = GetLoginDetails(HttpRequest) if( logindetails["userid"] == -1): messages.error(HttpRequest,'Please Login to continue') return HttpResponseRedirect('/user/login/') try: if 'SelectedBranch' in HttpRequest.POST: # get the key, and show the users list BranchName = HttpRequest.POST['SelectedBranch'] if 'BranchAdminChangeGroupSession' in HttpRequest.session.keys(): del HttpRequest.session['BranchAdminChangeGroupSession'] HttpRequest.session['BranchAdminChangeGroupSession'] = BranchName return HttpResponseRedirect('') else: messages.error(HttpRequest,'Could not get the selected branch value. Please try again.') return HttpResponseRedirect('/message/') except Exception, ex: frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) msg = '' for i in args: msg += "[%s : %s]" % (i,values[i]) LogUser.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg)) messages.error(HttpRequest,'ERROR: ' + str(ex)) return HttpResponseRedirect('/message/')
def superdo(self, *args, **kwargs): """Like calling :meth:`super()` with the right arguments ??? check if it works on multiple levels""" frame = sys._getframe(1) superObj = super(self.__class__, self) selector = frame.f_code.co_name selectorMethod = getattr(superObj, selector, None) if selectorMethod: if not(args or kwargs): srcargname, srcargs, srckwargs, vlocals = inspect.getargvalues(frame) srcdefaults = inspect.getargspec(getattr(self, selector))[3] if not srcdefaults: srcdefaults = [] nargs = len(srcargname) - len(srcdefaults) args = [vlocals[key] for key in srcargname[1:nargs]] if srcargs: args.extend(vlocals[srcargs]) kwargs = dict([(key, vlocals[key]) for key in srcargname[nargs:]]) if srckwargs: kwargs.update(vlocals[srckwargs]) dstargname, dstargs, dstkwargs, dstdefaults = inspect.getargspec(selectorMethod) if not dstdefaults: dstdefaults = [] nargs = len(dstargname) - len(dstdefaults) - 1 if not dstargs: args = args[:nargs] if not dstkwargs: dstkw = dstargname[-len(dstdefaults):] kwargs = dict([(key, value) for key, value in kwargs.items() if key in dstkw]) return selectorMethod(*args, **kwargs)
def _checkArgs( self, argNamesAndTypes ): """ Private method to check the validity of the parameters """ # inspect.stack()[1][0] returns the frame object ([0]) of the caller # function (stack()[1]). # The frame object is required for getargvalues. Getargvalues returns # a tuple with four items. The fourth item ([3]) contains the local # variables in a dict. args = inspect.getargvalues( inspect.stack()[ 1 ][ 0 ] )[ 3 ] # for argName, argType in argNamesAndTypes.iteritems(): if not args.has_key(argName): self._reportError( 'Method does not contain argument \'%s\'' % argName, __name__, **self._getArgsDict( 1 ) ) if not isinstance( args[argName], argType): self._reportError( 'Argument \'%s\' is not of type %s' % ( argName, argType ), __name__, **self._getArgsDict( 1 ) )
def _send(level, *msgs): frame = inspect.stack(3)[2] module = inspect.getmodule(frame[0].f_code) if module is not None: module = module.__name__ else: module = '<unknown>' args_infos = inspect.getargvalues(frame[0]) file_, line, function = frame[1], frame[2], frame[3] if args_infos.args and args_infos.args[0] == 'self': args_infos.args.pop(0) cls = args_infos.locals['self'].__class__.__name__ #.split('.')[-1] module = "%s.%s" % (module, cls) function = "%s(%s)" % ( function, ', '.join( "%s=%s" % (k, v) for k, v in ( (k, args_infos.locals[k]) for k in args_infos.args ) ), ) return _log.send( level, file_, line, function, module, ' '.join(str(p) for p in msgs) )
def frame_parser(frame_record): """ Parse given frame and return formatted values. :param frame_record: frame :return: tuple (fname, call, code, objects, local_vars) """ frame, fname, lnum, func, lines, index = frame_record fname = fname and os.path.abspath(fname) or '?' args, varargs, varkw, local_vars = inspect.getargvalues(frame) call = '' if func != '?': call = func + inspect.formatargvalues( args, varargs, varkw, local_vars, formatvalue=lambda value: '=' + pydoc.text.repr(value)) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(fname, lnum[0]) finally: lnum[0] += 1 code = format_code(lines, lnum, index) objects = cgitb.scanvars(reader, frame, local_vars) return (fname, call, code, objects, local_vars)
def get_callers_logger(): """ Get logger defined in caller's environment @return: logger instance (or None if none was found) """ logger_cls = logging.getLoggerClass() frame = inspect.currentframe() logger = None # frame may be None, see https://docs.python.org/2/library/inspect.html#inspect.currentframe if frame is not None: try: # consider calling stack in reverse order, i.e. most inner frame (closest to caller) first for frameinfo in inspect.getouterframes(frame)[::-1]: bindings = inspect.getargvalues(frameinfo[0]).locals for val in bindings.values(): if isinstance(val, logger_cls): logger = val break finally: # make very sure that reference to frame object is removed, to avoid reference cycles # see https://docs.python.org/2/library/inspect.html#the-interpreter-stack del frame return logger
def hook(self): '''Magic to hook various function calls in sslstrip''' #gets the function name and args of our caller frame = sys._getframe(1) fname = frame.f_code.co_name keys,_,_,values = inspect.getargvalues(frame) #assumes that no one calls del on an arg :-/ args = {} for key in keys: args[key] = values[key] #prevent self conflict args['request'] = args['self'] del args['self'] #calls any plugin that has this hook try: for f in self.pmthds[fname]: a = f(**args) if a != None: args = a except KeyError: pass #pass our changes to the locals back down return args
def getStack(self): """ Public method to get the stack. @return list of lists with file name (string), line number (integer) and function name (string) """ fr = self.cFrame stack = [] while fr is not None: fname = self._dbgClient.absPath(self.fix_frame_filename(fr)) if not fname.startswith("<"): fline = fr.f_lineno ffunc = fr.f_code.co_name if ffunc == '?': ffunc = '' if ffunc and not ffunc.startswith("<"): argInfo = inspect.getargvalues(fr) fargs = inspect.formatargvalues(argInfo[0], argInfo[1], argInfo[2], argInfo[3]) else: fargs = "" stack.append([fname, fline, ffunc, fargs]) if fr == self._dbgClient.mainFrame: fr = None else: fr = fr.f_back return stack
def user_line(self, frame): """ Public method reimplemented to handle the program about to execute a particular line. @param frame the frame object """ line = frame.f_lineno # We never stop on line 0. if line == 0: return fn = self._dbgClient.absPath(self.fix_frame_filename(frame)) # See if we are skipping at the start of a newly loaded program. if self._dbgClient.mainFrame is None: if fn != self._dbgClient.getRunning(): return self._dbgClient.mainFrame = frame self.currentFrame = frame self.currentFrameLocals = frame.f_locals # remember the locals because it is reinitialized when accessed fr = frame stack = [] while fr is not None: # Reset the trace function so we can be sure # to trace all functions up the stack... This gets around # problems where an exception/breakpoint has occurred # but we had disabled tracing along the way via a None # return from dispatch_call fr.f_trace = self.trace_dispatch fname = self._dbgClient.absPath(self.fix_frame_filename(fr)) if not fname.startswith("<"): fline = fr.f_lineno ffunc = fr.f_code.co_name if ffunc == '?': ffunc = '' if ffunc and not ffunc.startswith("<"): argInfo = inspect.getargvalues(fr) fargs = inspect.formatargvalues(argInfo[0], argInfo[1], argInfo[2], argInfo[3]) else: fargs = "" stack.append([fname, fline, ffunc, fargs]) if fr == self._dbgClient.mainFrame: fr = None else: fr = fr.f_back self.__isBroken = True self._dbgClient.write('%s%s\n' % (ResponseLine, unicode(stack))) self._dbgClient.eventLoop()
def ExtraAcdemicInfoTypeUpdate(HttpRequest): ip = HttpRequest.META['REMOTE_ADDR'] logindetails = GetLoginDetails(HttpRequest) print logindetails if( logindetails["userid"] == -1): messages.error(HttpRequest,'Please Login to continue') return HttpResponseRedirect('/user/login/') try: ExtraAcademicInfoObj=ExtraAcademicInfo() flag=1 if "Id" in HttpRequest.POST: Id=HttpRequest.POST["Id"] else: messages.error(HttpRequest,"Error fetching data from form for Id"); flag=-1; if "ExtraAcdemicInfoTypeName" in HttpRequest.POST: ExtraAcademicInfoTypeName=HttpRequest.POST["ExtraAcdemicInfoTypeName"] else: messages.error(HttpRequest,"Error fetching data from form for ExtraAcdemicInfoTypeName"); flag=-1; if flag==-1: return HttpResponseRedirect('/message/') result=ExtraAcademicInfoObj.UpdateExtraAcademicInfoType(Id, ExtraAcademicInfoTypeName, logindetails["userid"], ip); messages.error(HttpRequest,"result is %s"%result); return HttpResponseRedirect('/message/') except Exception, ex: frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) msg = '' for i in args: msg += "[%s : %s]" % (i,values[i]) Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg)) messages.error(HttpRequest,'ERROR: ' + str(ex)) return HttpResponseRedirect('/message/')
def __init__(self, initial, final, charge): args, _, _, values = inspect.getargvalues(inspect.currentframe()) Action.__init__(self, {arg: values[arg] for arg in args if arg not in ['self']}) self.transitions = { 'robot': Transition(initial, final), 'charge': Transition('+' if charge == '-' else '-', charge) }
def debug(*arguments): if 0: f = inspect.stack() f = f[2] f = f[0] f = inspect.getargvalues(f) # f = inspect.getframeinfo(f) # f = f.locals.get('self') out.println(util.timestamp(), ' [debug] <', str(f), '> ', *arguments) return if 0: out.println(util.timestamp(), ' [debug] ', *arguments) return if 0: # 呼び出しフレーム frame_object = inspect.stack()[1] # スタックトレース trace = traceback.extract_stack(frame_object[0]) out.println(trace) out.println(inspect.getmodule(trace[1][2])) info = trace[1][2] # info = repr(info) out.println(util.timestamp(), ' [debug] <', info, '> ', *arguments) return if 1: f = inspect.currentframe() f = inspect.getframeinfo(f) # print(f.module + '.' + f.function) print(repr(f))
def ExtraAcademicInfoDetailsIndex(HttpRequest): ip = HttpRequest.META['REMOTE_ADDR'] logindetails = GetLoginDetails(HttpRequest) print logindetails try: if( logindetails["userid"] == -1): messages.error(HttpRequest,'Please Login to continue') return HttpResponseRedirect('/user/login/') yearlist=range(1985,2014); if 'v' in HttpRequest.GET: if is_integer(HttpRequest.GET['v']): _id=int(HttpRequest.GET['v']) try: obj=ExtraAcademicInfoDetails.objects.get(User=logindetails["userid"],id=_id) return render_to_response("UserProfile/ExtraAcademicInfoDetails.html",{'yearlist':yearlist,'ExtraAcadStatus':obj},context_instance=RequestContext(HttpRequest)) except ObjectDoesNotExist: return render_to_response("UserProfile/ExtraAcademicInfoDetails.html",{'yearlist':yearlist,},context_instance=RequestContext(HttpRequest)) else: return render_to_response("UserProfile/ExtraAcademicInfoDetails.html",{'yearlist':yearlist,},context_instance=RequestContext(HttpRequest)) except Exception, ex: frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) msg = '' for i in args: msg += "[%s : %s]" % (i,values[i]) Logger_User.exception('%s : %s' % (inspect.getframeinfo(frame)[2],msg)) messages.error(HttpRequest,'ERROR: ' + str(ex)) return HttpResponseRedirect('/message/')
def wrapper(*args, **kwargs): # if catch ImportError exception, we should know # how to do now print "import %s modules" %name if len(args): if inspect.isfunction(args[0]): print "first argument must be the function:%s"%args[0] else: print "why not it function??" # if frame = inspect.currentframe() print inspect.getargvalues(frame) __import__(name)
def format_frame_info(frame): """ Formats the given stack frame to show its position in the code and part of its context :param frame: A stack frame """ # Same as in traceback.extract_stack line_no = frame.f_lineno code = frame.f_code filename = code.co_filename method_name = code.co_name linecache.checkcache(filename) try: # Try to get the type of the calling object instance = frame.f_locals['self'] method_name = '{0}::{1}'.format(type(instance).__name__, method_name) except KeyError: # Not called from a bound method pass # File & line output_lines = [' File "{0}", line {1}, in {2}' .format(filename, line_no, method_name)] # Arguments if frame.f_locals: # Pypy keeps f_locals as an empty dictionary arg_info = inspect.getargvalues(frame) for name in arg_info.args: try: output_lines.append( ' - {0:s} = {1}'.format( name, repr(frame.f_locals[name]))) except TypeError: # Happens in dict/list-comprehensions in Python 2.x name = name[0] output_lines.append( ' - {0:s} = {1}'.format( name, repr(frame.f_locals[name]))) if arg_info.varargs: output_lines.append( ' - *{0:s} = {1}'.format( arg_info.varargs, frame.f_locals[arg_info.varargs])) if arg_info.keywords: output_lines.append( ' - **{0:s} = {1}'.format( arg_info.keywords, frame.f_locals[arg_info.keywords])) # Line block lines = _extract_lines(filename, frame.f_globals, line_no, 3) if lines: output_lines.append('') prefix = ' ' output_lines.append( '{0}{1}'.format(prefix, '\n{0}'.format(prefix).join(lines))) return '\n'.join(output_lines)
def __init__(self, rest_url=None, rest_username=None, rest_password=None): args, _, _, values = inspect.getargvalues(inspect.currentframe()) for i in args: setattr(self, i, values[i]) self.rest_urls = [] # Set up for REST ################# # handle urls that have a slash at the end by truncating it def process(url): c = {} if url[-1] == "/": url = url[0:len(url) - 1] parts = url.split("://") if len(parts) > 1: c['protocol'] = parts[0] c['path'] = parts[1] c['url'] = url else: c['protocol'] = "http" c['rest_port'] = 80 c['path'] = url c['url'] = "://".join([c['protocol'], c['path']]) path_components = c['path'].split("/") c['query_string'] = "/".join(path_components[1:len(path_components)]) self.rest_urls.append(c) if isinstance(rest_url, tuple) or isinstance(rest_url, list): for url in rest_url: process(url) else: process(rest_url)
def debug_exceptions(type, value, tb): base_name = "dump" # find a browser object in the stack frames = inspect.getinnerframes(tb) frames.reverse() # reversed because we want the innermost first browser = None for frame, _, _, _, _, _ in frames: for v in inspect.getargvalues(frame).locals.values(): if isinstance(v, Browser): browser = v break localest = frames[0][0] # stick a trace in a file with open(base_name + '.trace', 'w') as tracefile: tracefile.write("Locals:\n") pprint(localest.f_locals, tracefile) tracefile.write("\n") if browser is not None: tracefile.write("URL: %s\n" % browser.url) tracefile.write("\n") traceback.print_tb(tb, file=tracefile) if browser is not None: browser.save(base_name + '.html') # then call the default handler sys.__excepthook__(type, value, tb)
def __init__( self, name, ec2Connection=None, autoconsole_port="8888", instance_id=None, isBroker=False, ssh_key=None, # Name of AWS Keypair region="default", web_console_port="8888", ): args, _, _, values = inspect.getargvalues(inspect.currentframe()) for i in args: setattr(self, i, values[i]) self.exists = False for reservation in self.ec2Connection.get_all_reservations(): for instance in reservation.instances: if self.instance_id != None and instance.id == self.instance_id: self.exists = True self.instance = instance self.name = instance.tags["Name"] self.zone = instance._placement self.id = instance_id self.update_data() if ( "Name" in instance.__dict__["tags"] and instance.__dict__["tags"]["Name"] == name and (instance.state == "running" or instance.state == "pending") ): self.exists = True self.instance = instance self.zone = instance._placement self.update_data()
def test_previous_frame(self): args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back) self.assertEqual(args, ['a', 'b', 'c', 'd', 'e', 'f']) self.assertEqual(varargs, 'g') self.assertEqual(varkw, 'h') self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals), '(a=7, b=8, c=9, d=3, e=4, f=5, *g=(), **h={})')
def getargvalues(frame): '''Inspects the given frame for arguments and returns a dictionary that maps parameters names to arguments values. If an `*` argument was passed then the key on the returning dictionary would be formatted as `<name-of-*-param>[index]`. For example in the function:: >>> def autocontained(a, limit, *margs, **ks): ... import sys ... return getargvalues(sys._getframe()) >>> autocontained(1, 12)['limit'] 12 >>> autocontained(1, 2, -10, -11)['margs[0]'] -10 ''' from xoutil.types import is_collection from xoutil.iterators import flatten pos, args, kwds, values = inspect.getargvalues(frame) res = {} for keys in pos: if not is_collection(keys): keys = (keys,) res.update({key: values[key] for key in flatten(keys)}) if args: i = 0 for item in values[args]: res['%s[%s]' % (args, i)] = item i += 1 if kwds: res.update(values[kwds]) return res
def parameters(only=None, exclude=None, ignore='self'): """Returns a dictionary of the calling functions parameter names and values. The optional arguments can be used to filter the result: only use this to only return parameters from this list of names. exclude use this to return every parameter *except* those included in this list of names. ignore use this inside methods to ignore the calling object's name. For convenience, it ignores 'self' by default. """ import inspect args, varargs, varkw, defaults = \ inspect.getargvalues(inspect.stack()[1][0]) if only is None: only = args[:] if varkw: only.extend(defaults[varkw].keys()) defaults.update(defaults[varkw]) if exclude is None: exclude = [] exclude.append(ignore) return dict([(attrname, defaults[attrname]) for attrname in only if attrname not in exclude])
def __init__(self, frame=None, spat=None, spec=None, detname=None, fwhm=None, neg=None): # Parse args, _, _, values = inspect.getargvalues(inspect.currentframe()) d = dict([(k,values[k]) for k in args[1:]]) # Setup the DataContainer datamodel.DataContainer.__init__(self, d=d)
def pt_html(context=5, i18n=None): _ = get_translator(i18n) esc = cgi.escape exc_info = [esc(str(value)) for value in sys.exc_info()[:2]] l = [ _('<h1>Templating Error</h1>\n' '<p><b>%(exc_type)s</b>: %(exc_value)s</p>\n' '<p class="help">Debugging information follows</p>') % { 'exc_type': exc_info[0], 'exc_value': exc_info[1] }, '<ol>', ] from roundup.cgi.PageTemplates.Expressions import TraversalError t = inspect.trace(context) t.reverse() for frame, file, lnum, func, lines, index in t: args, varargs, varkw, locals = inspect.getargvalues(frame) if '__traceback_info__' in locals: ti = locals['__traceback_info__'] if isinstance(ti, TraversalError): s = [] for name, info in ti.path: s.append( _('<li>"%(name)s" (%(info)s)</li>') % { 'name': name, 'info': esc(repr(info)) }) s = '\n'.join(s) l.append( _('<li>Looking for "%(name)s", ' 'current path:<ol>%(path)s</ol></li>') % { 'name': ti.name, 'path': s }) else: l.append(_('<li>In %s</li>') % esc(str(ti))) if '__traceback_supplement__' in locals: ts = locals['__traceback_supplement__'] if len(ts) == 2: supp, context = ts s = _('A problem occurred in your template "%s".') \ % str(context.id) if context._v_errors: s = s + '<br>' + '<br>'.join( [esc(x) for x in context._v_errors]) l.append('<li>%s</li>' % s) elif len(ts) == 3: supp, context, info = ts l.append( _(''' <li>While evaluating the %(info)r expression on line %(line)d <table class="otherinfo" style="font-size: 90%%"> <tr><th colspan="2" class="header">Current variables:</th></tr> %(globals)s %(locals)s </table></li> ''') % { 'info': info, 'line': context.position[0], 'globals': niceDict(' ', context.global_vars), 'locals': niceDict(' ', context.local_vars) }) l.append( ''' </ol> <table style="font-size: 80%%; color: gray"> <tr><th class="header" align="left">%s</th></tr> <tr><td><pre>%s</pre></td></tr> </table>''' % (_('Full traceback:'), cgi.escape(''.join(traceback.format_exception(*sys.exc_info()))))) l.append('<p> </p>') return '\n'.join(l)
def dig(self, rname, rtype, rclass="IN", udp=None, serial=None, timeout=None, tries=3, flags="", bufsize=None, edns=None, nsid=False, dnssec=False, log_no_sep=False, tsig=None, addr=None, source=None): # Convert one item zone list to zone name. if isinstance(rname, list): if len(rname) != 1: raise Failed("One zone required") rname = rname[0].name rtype_str = rtype.upper() # Set port type. if rtype.upper() == "AXFR": # Always use TCP. udp = False elif rtype.upper() == "IXFR": # Use TCP if not specified. udp = udp if udp != None else False rtype_str += "=%i" % int(serial) else: # Use TCP or UDP at random if not specified. udp = udp if udp != None else random.choice([True, False]) if udp: dig_flags = "+notcp" else: dig_flags = "+tcp" dig_flags += " +retry=%i" % (tries - 1) # Set timeout. if timeout is None: timeout = self.DIG_TIMEOUT dig_flags += " +time=%i" % timeout # Prepare query (useless for XFR). query = dns.message.make_query(rname, rtype, rclass) # Remove implicit RD flag. query.flags &= ~dns.flags.RD # Set packet flags. flag_names = flags.split() for flag in flag_names: if flag == "AA": query.flags |= dns.flags.AA dig_flags += " +aa" elif flag == "TC": query.flags |= dns.flags.TC dig_flags += " +tc" elif flag == "RD": query.flags |= dns.flags.RD dig_flags += " +rd" elif flag == "RA": query.flags |= dns.flags.RA dig_flags += " +ra" elif flag == "AD": query.flags |= dns.flags.AD dig_flags += " +ad" elif flag == "CD": query.flags |= dns.flags.CD dig_flags += " +cd" elif flag == "Z": query.flags |= 64 dig_flags += " +z" # Set EDNS. if edns != None or bufsize or nsid: class NsidFix(object): '''Current pythondns doesn't implement NSID option.''' def __init__(self): self.otype = dns.edns.NSID def to_wire(self, file=None): pass if edns: edns = int(edns) else: edns = 0 dig_flags += " +edns=%i" % edns if bufsize: payload = int(bufsize) else: payload = 1232 dig_flags += " +bufsize=%i" % payload if nsid: options = [NsidFix()] dig_flags += " +nsid" else: options = None query.use_edns(edns=edns, payload=payload, options=options) # Set DO flag. if dnssec: query.want_dnssec() dig_flags += " +dnssec +bufsize=%i" % query.payload # Store function arguments for possible comparation. args = dict() params = inspect.getargvalues(inspect.currentframe()) for param in params.args: if param != "self": args[param] = params.locals[param] if addr is None: addr = self.addr # Add source to dig flags if present if source is not None: dig_flags += " -b " + source check_log("DIG %s %s %s @%s -p %i %s" % (rname, rtype_str, rclass, addr, self.port, dig_flags)) # Set TSIG for a normal query if explicitly specified. key_params = dict() if tsig != None: if type(tsig) is dnstest.keys.Tsig: key_params = tsig.key_params elif tsig and self.tsig_test: key_params = self.tsig_test.key_params if key_params: query.use_tsig(keyring=key_params["keyring"], keyname=key_params["keyname"], algorithm=key_params["keyalgorithm"]) # Set TSIG for a transfer if available. if rtype.upper() == "AXFR" or rtype.upper() == "IXFR": if self.tsig_test and tsig != False: key_params = self.tsig_test.key_params if key_params: detail_log( "%s:%s:%s" % (key_params["keyalgorithm"], key_params["keyname"], base64.b64encode(list( key_params["keyring"].values())[0]).decode('ascii'))) for t in range(tries): try: if rtype.upper() == "AXFR": resp = dns.query.xfr(addr, rname, rtype, rclass, port=self.port, lifetime=timeout, use_udp=udp, **key_params) elif rtype.upper() == "IXFR": resp = dns.query.xfr(addr, rname, rtype, rclass, port=self.port, lifetime=timeout, use_udp=udp, serial=int(serial), **key_params) elif udp: resp = dns.query.udp(query, addr, port=self.port, timeout=timeout, source=source) else: resp = dns.query.tcp(query, addr, port=self.port, timeout=timeout, source=source) if not log_no_sep: detail_log(SEP) return dnstest.response.Response(self, resp, query, args) except dns.exception.Timeout: pass except: time.sleep(timeout) raise Failed("Can't query server='%s' for '%s %s %s'" % \ (self.name, rname, rclass, rtype))
def analyse(exctyp, value, tb): import tokenize import keyword import platform from gui import application from gui.meta import get_libs_version_string app = application.get_app() trace = StringIO() nlines = 3 frecs = inspect.getinnerframes(tb, nlines) trace.write('Mypaint version: %s\n' % app.version) trace.write('System information: %s\n' % platform.platform()) trace.write('Using: %s\n' % (get_libs_version_string(), )) trace.write('Traceback (most recent call last):\n') for frame, fname, lineno, funcname, context, cindex in frecs: trace.write(' File "%s", line %d, ' % (fname, lineno)) args, varargs, varkw, lcls = inspect.getargvalues(frame) def readline(lno=[lineno], *args): if args: print(args) try: return linecache.getline(fname, lno[0]) finally: lno[0] += 1 all, prev, name, scope = {}, None, '', None for ttype, tstr, stup, etup, line in tokenize.generate_tokens( readline): if ttype == tokenize.NAME and tstr not in keyword.kwlist: if name: if name[-1] == '.': try: val = getattr(prev, tstr) except AttributeError: # XXX skip the rest of this identifier only break name += tstr else: assert not name and not scope scope, val = lookup(tstr, frame, lcls) name = tstr if val is not None: prev = val elif tstr == '.': if prev: name += '.' else: if name: all[name] = (scope, prev) prev, name, scope = None, '', None if ttype == tokenize.NEWLINE: break try: details = inspect.formatargvalues( args, varargs, varkw, lcls, formatvalue=lambda v: '=' + pydoc.text.repr(v)) except: # seen that one on Windows (actual exception was KeyError: self) details = '(no details)' trace.write(funcname + details + '\n') if context is None: context = ['<source context missing>\n'] trace.write(''.join([ ' ' + x.replace('\t', ' ') for x in filter(lambda a: a.strip(), context) ])) if len(all): trace.write(' variables: %s\n' % str(all)) trace.write('%s: %s' % (exctyp.__name__, value)) return trace
def _add_locals_data(data, exc_info): if not SETTINGS['locals']['enabled']: return frames = data['body']['trace']['frames'] cur_tb = exc_info[2] frame_num = 0 num_frames = len(frames) while cur_tb: cur_frame = frames[frame_num] tb_frame = cur_tb.tb_frame cur_tb = cur_tb.tb_next if not isinstance(tb_frame, types.FrameType): # this can happen if the traceback or frame is wrapped in some way, # for example by `ExceptionInfo` in # https://github.com/celery/billiard/blob/master/billiard/einfo.py log.warning('Traceback frame not a types.FrameType. Ignoring.') frame_num += 1 continue # Create placeholders for argspec/varargspec/keywordspec/locals argspec = None varargspec = None keywordspec = None _locals = {} try: arginfo = inspect.getargvalues(tb_frame) # Optionally fill in locals for this frame if arginfo.locals and _check_add_locals(cur_frame, frame_num, num_frames): # Get all of the named args # # args can be a nested list of args in the case where there # are anonymous tuple args provided. # e.g. in Python 2 you can: # def func((x, (a, b), z)): # return x + a + b + z # # func((1, (1, 2), 3)) argspec = _flatten_nested_lists(arginfo.args) if arginfo.varargs is not None: varargspec = arginfo.varargs if SETTINGS['locals']['scrub_varargs']: temp_varargs = list(arginfo.locals[varargspec]) for i, arg in enumerate(temp_varargs): temp_varargs[i] = REDACT_REF arginfo.locals[varargspec] = tuple(temp_varargs) if arginfo.keywords is not None: keywordspec = arginfo.keywords _locals.update(arginfo.locals.items()) except Exception: log.exception('Error while extracting arguments from frame. Ignoring.') # Finally, serialize each arg/kwarg/local separately so that we only report # CircularReferences for each variable, instead of for the entire payload # as would be the case if we serialized that payload in one-shot. if argspec: cur_frame['argspec'] = argspec if varargspec: cur_frame['varargspec'] = varargspec if keywordspec: cur_frame['keywordspec'] = keywordspec if _locals: cur_frame['locals'] = dict((k, _serialize_frame_data(v)) for k, v in iteritems(_locals)) frame_num += 1
def nn2(data, query=None, k=None, treetype="kd", searchtype="standard", radius=0.0, eps=0.0): """ Nearest Neighbour Search Uses a kd-tree to find the p number of near neighbours for each point in an input/output dataset. The advantage of the kd-tree is that it runs in O(M log M) time. The Pyann package utilizes the Approximate Near Neighbor (ANN) C++ library, which can give the exact near neighbours or (as the name suggests) approximate near neighbours to within a specified error bound. For more information on the ANN library please visit http://www.cs.umd.edu/~mount/ANN/. Search types: - `priority`: visits cells in increasing order of distance from the query point, and hence, should converge more rapidly on the true nearest neighbour, but `standard` is usually faster for exact searches. - `radius`: only searches for neighbours within a specified radius of the point. If there are no neighbours then `nn_idx` will contain 0 and `nn_dists` will contain 1.340781e+154 for that point. Parameters ---------- data: array_like - M-D `np.matrix`, where each of the M rows is a point - M-D `np.ndarray`, where `D == 1` or `None`. query: array_like, optional points that will be queried against data. - N-D `np.matrix` - N-D `np.array`, where `D == 1` or `None` D must be the same as `data`. if `None` (default), `query == data`; k: float, int, optional The maximum number of nearest neighbours to compute. if `None` (default), k is set to `data.shape[0]` or 10, whichever smaller. treetype: str, optional Options: -`'kd'`: standard kd tree - `'bd'`: bd (box-decomposition, AMNSW98) tree which may perform better for larger point sets default is `'kd'` searchtype: str, optional Options: - `'standard'` - `'priority'` - `'radius'` See above for more detail. default is `'standard'`. radius: float, int, optional radius Radius of search for `searchtype='radius'` default is `0.0`. eps: float, int, optional error bound. default of `0.0` implies exact nearest neighbour search. Returns ------- NN2Results Object of class `NN2Results` with two attributes: `nn_idx` and `nn_dists`. - `nn_idx`: A N-k integer `np.matrix` returning the near neighbour indices. - `nn_dists`: A N-k `np.matrix` returning the near neighbour Euclidean distances. Examples ------- > results = pyann.nn2(np.matrix([[1, 0], [2, 0]]), np.matrix([[1.01, 0], [3, 0], [4.0, 0]]), k=1) > results.nn_idx matrix([[1], [2], [2]]) > results.nn_dists matrix([[0.01], [1. ], [2. ]]) """ if k is None: k = np.minimum(10, nrow(data)) if query is None: query = data check_args(inspect.getargvalues(inspect.currentframe())) dimension = ncol(data) if dimension is None: dimension = 1 query_dimension = ncol(query) if query_dimension is None: query_dimension = 1 ND = nrow(data) if ND is None: ND = len(data) NQ = nrow(query) if NQ is None: NQ = len(query) if dimension != query_dimension: raise DimensionError('data dimension does not equal query dimension') if k > ND: raise ValueError( "Cannot find more nearest neighbours than there are points: k < nrow(data)" ) searchtype_opts = ["standard", "priority", "radius"] searchtypeInt = searchtype_opts.index(searchtype) + 1 treetype_opts = ["kd", "bd"] treetype = treetype_opts.index(treetype) data = np.array(data.transpose(), dtype=np.double).reshape(-1, ) if len(data) == 0: raise ValueError('no data points in data') query = np.array(query.transpose(), dtype=np.double).reshape(-1, ) if len(query) == 0: raise ValueError('no query points in data') if len([x for x in query if np.isnan(x) or np.isinf(x)]) != 0: raise ValueError('NA/NaN/Inf in foreign function call (arg 2)') if len([x for x in data if np.isnan(x) or np.isinf(x)]) != 0: raise ValueError('NA/NaN/Inf in foreign function call (arg 1)') results = annlib.py_get_NN_2Set(data, query, int(dimension), int(ND), int(NQ), int(k), int(eps), int(searchtypeInt), int(treetype), float(radius * radius), np.zeros(k * NQ, dtype=np.intc), np.zeros(k * NQ, dtype=np.double)) results = [np.matrix(x).reshape(int(len(x) / k), int(k)) for x in results] nnresults = NN2Results(*results) return nnresults
def sw_set_var(name,val, layer = 3): stack = inspect.stack() inspect.getargvalues(stack[layer].frame).locals[name]=val
def plot(x, y, z, radius=0.1, resolution=8, color=(0, 1, 0, 1), #alpha=1, emission=None, roughness=1, rotation_x=0, rotation_y=0, rotation_z=0, marker=None, marker_orientation=(0, 0), layers=None): """ Line plot in 3 dimensions as a line, tube or shapes. call signature: plot(x, y, z, radius=0.1, resolution=8, color=(0, 1, 0, 1), emission=None, rotation_x=0, rotation_y=0, rotation_z=0, roughness=1, marker='sphere', marker_orientation=(0, 0)) Keyword arguments: *x, y, z*: x, y and z coordinates of the points to be plotted. These are 1d arrays of the same length. *radius*: Radius of the plotted tube, i.e. line width. Positive real number or array. *rotation_[xyz]*: Rotation angle around the xyz axis. Real number or array. *resolution*: Azimuthal resolution of the tubes in vertices. Positive integer > 2. *color*: rgb values of the form (r, g, b) with 0 <= r, g, b <= 1, or string, e.g. 'red' or character, e.g. 'r', or n-array of strings/character, or [n, 3] array with rgb values. *emission* Light emission by the line or markers. and 'roughness'. Real number for a line plot and array for markers. *roughness*: Texture roughness. *rotation_[xyz]*: Rotation of the markers. Accepts array. *marker*: Marker to be used for the plot. String with standard Blender 3d shapes: 'cube', 'uv_sphere', 'ico_sphere', 'cylinder', 'cone', 'torus', 'monkey'. Custom shape or blender object. 1d array of one of the above. *marker_orientation*: Tuple of Euler angles for the orientation of the markers or [n, 2] array with the angles. *layers*: List or numpy array of layers where the plot will be visible. """ import inspect # Assign parameters to the PathLine objects. path_line_return = PathLine() argument_dict = inspect.getargvalues(inspect.currentframe()).locals for argument in argument_dict: setattr(path_line_return, argument, argument_dict[argument]) path_line_return.plot() return path_line_return
def CartAdd(Cart, Items, Quantities, ResponseGroup=None, AWSAccessKeyId=None): '''CartAdd in ECS''' argv = inspect.getargvalues(inspect.currentframe())[-1] return __cartOperation(XMLCartAdd, argv)
def train_ann(goal_type='atr', n_steps=10, shape=[60, 30, 1], train_alg="train_rprop", epochs=500, goal=0.000001, adapt=False): base_name = "train_anns/{}_{}_steps_{}_{}_{}_{}_adapt_{}".format( goal_type, n_steps, train_alg, shape[0], shape[1], shape[2], adapt) base_path_name = "/".join([base_path, base_name]) str_template = "{}/{}" if not os.path.exists(base_path_name): os.makedirs(base_path_name) goal_row = 9 if goal_type == 'atr': data = ExcelDataReader("dados_usm_estruturados.xlsx", l1=1, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, goal_row)) else: data = ExcelDataReader("dados_usm_estruturados.xlsx", l1=1, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, 10)) gen_train_sets = GenerateNormalizedTrainSets(data.data()[0:396]) validation_set = GenerateNormalizedTrainSets(data.data()[396:]) dataset = gen_train_sets.normalized_data_set_separator( n_steps, goal_row, False, norm_rule="zero_one") mlp = TimeSeriesMLPMultivariate(shape, train_alg) if train_alg != "train_ncg": min_error = mlp.train(dataset, save_plot=True, filename=str_template.format( base_path_name, "train_stage"), epochs=epochs, goal=goal, adapt=adapt) else: min_error = mlp.train(dataset, save_plot=True, filename=str_template.format( base_path_name, "train_stage"), epochs=epochs, goal=goal) sim = mlp.sim(x_label="{} estimado".format(goal_type.upper()), y_label="{} real".format(goal_type.upper()), save_plot=True, filename=str_template.format(base_path_name, "estimado_scatter")) mlp.out(validation_set.normalized_data_set_separator(n_steps, goal_row, False, norm_rule="zero_one"), x_label="{} previsto".format(goal_type.upper()), y_label="{} real".format(goal_type.upper()), save_plot=True, filename=str_template.format(base_path_name, "previsto_scatter")) predicted = mlp.out( validation_set.normalized_data_set_separator(n_steps, goal_row, False, norm_rule="zero_one"), x_label="{} previsto".format(goal_type.upper()), y_label="{} real".format(goal_type.upper()), plot_type='plot', save_plot=True, filename=str_template.format(base_path_name, "previsto_line")) mlp.save(str_template.format(base_path_name, "ann")) r_q_est = r_sqrt( gen_train_sets.normalized_data_set_separator(n_steps, goal_row, False, norm_rule="zero_one"), sim) r_q = r_sqrt( validation_set.normalized_data_set_separator(n_steps, goal_row, False, norm_rule="zero_one"), predicted) with open(str_template.format(base_path_name, "params.txt"), "wb") as f: frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) for i in args: f.write(bytes("{}: {}\n".format(i, values[i]), encoding='utf8')) f.write( bytes("minimum error: {}\n".format(min_error[-1]), encoding='utf8')) f.write( bytes("r squared estimation: {}\n".format(r_q_est), encoding='utf8')) f.write(bytes("r squared forecast: {}\n".format(r_q), encoding='utf8')) with open(str_template.format(base_path_name, "r_squared_estimated.txt"), "wb") as f: f.write(bytes("{};\n".format(r_q_est), encoding='utf8')) with open(str_template.format(base_path_name, "r_squared_forecast.txt"), "wb") as f: f.write(bytes("{};\n".format(r_q), encoding='utf8')) return r_q
def text(einfo, context=5): """Return a plain text document describing a given traceback.""" etype, evalue, etb = einfo if isinstance(etype, type): etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, in the order they occurred. ''' frames = [] records = inspect.getinnerframes(etb, context) for frame, file, lnum, func, lines, index in records: file = file and os.path.abspath(file) or '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = 'in ' + func if func != "<module>": call += inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.text.repr(value)) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = scanvars(reader, frame, locals) rows = [' %s %s' % (file, call)] if index is not None: i = lnum - index for line in lines: num = '%5d ' % i rows.append(num+line.rstrip()) i += 1 done, dump = {}, [] for name, where, value in vars: if name in done: continue done[name] = 1 if value is not __UNDEF__: if where == 'global': name = 'global ' + name elif where != 'local': name = where + name.split('.')[-1] dump.append('%s = %s' % (name, pydoc.text.repr(value))) else: dump.append(name + ' undefined') rows.append('\n'.join(dump)) frames.append('\n%s\n' % '\n'.join(rows)) exception = ['%s: %s' % (str(etype), str(evalue))] for name in dir(evalue): value = pydoc.text.repr(getattr(evalue, name)) exception.append('\n%s%s = %s' % (" "*4, name, value)) return head + ''.join(frames) + ''.join(exception) + ''' The above is a description of an error in a Python program. Here is the original traceback: %s ''' % ''.join(traceback.format_exception(etype, evalue, etb))
def html(context=5, i18n=None): _ = get_translator(i18n) etype, evalue = sys.exc_info()[0], sys.exc_info()[1] if type(etype) is type: etype = etype.__name__ pyver = 'Python ' + string.split(sys.version)[0] + '<br>' + sys.executable head = pydoc.html.heading( _('<font size=+1><strong>%(exc_type)s</strong>: %(exc_value)s</font>') % { 'exc_type': etype, 'exc_value': evalue }, '#ffffff', '#777777', pyver) head = head + (_('<p>A problem occurred while running a Python script. ' 'Here is the sequence of function calls leading up to ' 'the error, with the most recent (innermost) call first. ' 'The exception attributes are:')) indent = '<tt><small>%s</small> </tt>' % (' ' * 5) traceback = [] for frame, file, lnum, func, lines, index in inspect.trace(context): if file is None: link = _("<file is None - probably inside <tt>eval</tt> " "or <tt>exec</tt>>") else: file = os.path.abspath(file) link = '<a href="file:%s">%s</a>' % (file, pydoc.html.escape(file)) args, varargs, varkw, locals = inspect.getargvalues(frame) if func == '?': call = '' else: call = _( 'in <strong>%s</strong>') % func + inspect.formatargvalues( args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.html.repr(value)) level = ''' <table width="100%%" bgcolor="#dddddd" cellspacing=0 cellpadding=2 border=0> <tr><td>%s %s</td></tr></table>''' % (link, call) if index is None or file is None: traceback.append('<p>' + level) continue # do a file inspection names = [] def tokeneater(type, token, start, end, line, names=names): if type == tokenize.NAME and token not in keyword.kwlist: if token not in names: names.append(token) if type == tokenize.NEWLINE: raise IndexError def linereader(file=file, lnum=[lnum]): line = linecache.getline(file, lnum[0]) lnum[0] = lnum[0] + 1 return line try: tokenize.tokenize(linereader, tokeneater) except IndexError: pass lvals = [] for name in names: if name in frame.f_code.co_varnames: if name in locals: value = pydoc.html.repr(locals[name]) else: value = _('<em>undefined</em>') name = '<strong>%s</strong>' % name else: if name in frame.f_globals: value = pydoc.html.repr(frame.f_globals[name]) else: value = _('<em>undefined</em>') name = '<em>global</em> <strong>%s</strong>' % name lvals.append('%s = %s' % (name, value)) if lvals: lvals = string.join(lvals, ', ') lvals = indent + '<small><font color="#909090">%s'\ '</font></small><br>'%lvals else: lvals = '' excerpt = [] i = lnum - index for line in lines: number = ' ' * (5 - len(str(i))) + str(i) number = '<small><font color="#909090">%s</font></small>' % number line = '<tt>%s %s</tt>' % (number, pydoc.html.preformat(line)) if i == lnum: line = ''' <table width="100%%" bgcolor="white" cellspacing=0 cellpadding=0 border=0> <tr><td>%s</td></tr></table>''' % line excerpt.append('\n' + line) if i == lnum: excerpt.append(lvals) i = i + 1 traceback.append('<p>' + level + string.join(excerpt, '\n')) traceback.reverse() exception = '<p><strong>%s</strong>: %s' % (str(etype), str(evalue)) attribs = [] if type(evalue) is types.InstanceType: for name in dir(evalue): value = pydoc.html.repr(getattr(evalue, name)) attribs.append('<br>%s%s = %s' % (indent, name, value)) return head + string.join(attribs) + string.join( traceback) + '<p> </p>'
def classify(X, y, verbose=False, nfolds=5, dim_red=None, n_components=[5, 10, 20], scale=True, fs=None, njobs=1, LR_C=[.01, .1, 1, 10, 100], LR_class_weight=[None, 'balanced'], SVC_C=[.01, .1, 1, 10, 100], SVC_class_weight=[None, 'balanced'], SVC_kernels=['rbf', 'linear', 'poly'], n_estimators=[10, 20, 30], max_features=['auto', 'log2', None], shuffle=False, **kwargs): # spit out to the screen the function parameters, for logging if verbose: import inspect frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) print 'function name "%s"' % inspect.getframeinfo(frame)[2] for i in args[2:]: print " %s = %s" % (i, values[i]) # prepare configuration for cross validation test harness num_instances = len(X) seed = 8 # prepare models models = [] # all these support multiclass: # http://scikit-learn.org/stable/modules/multiclass.html models.append(('LR', LogisticRegression(multi_class='multinomial', solver='newton-cg'), {"C": LR_C, "class_weight": LR_class_weight})) models.append(('LDA', LinearDiscriminantAnalysis(), {})) models.append(('RndFor', RandomForestClassifier(), {'n_estimators': n_estimators, 'max_features': max_features})) models.append(('NB', GaussianNB(), {})) models.append(('SVC', SVC(), {"C": SVC_C, "class_weight": SVC_class_weight, 'kernel': SVC_kernels})) models.append(('Most frequent', DummyClassifier(strategy='most_frequent'), {})) models.append(('Stratified', DummyClassifier(strategy='stratified'), {})) # spit out to the screen the parameters to be tried in each classifier if verbose: print 'Trying these parameters:' for m in models: print m[0], ':', m[2] # evaluate each model in turn results = [] names = [] scoring = 'accuracy' for name, model, params in models: # need to create the CV objects inside the loop because they get used # and not get reset! if shuffle: inner_cv = StratifiedShuffleSplit(n_splits=nfolds, test_size=.1, random_state=seed) outer_cv = StratifiedShuffleSplit(n_splits=nfolds, test_size=.1, random_state=seed) else: # do this if no shuffling is wanted inner_cv = StratifiedKFold(n_splits=nfolds, random_state=seed) outer_cv = StratifiedKFold(n_splits=nfolds, random_state=seed) steps = [('clf', model)] pipe_params = {} for key, val in params.iteritems(): key_name = 'clf__%s' % key pipe_params[key_name] = val if fs == 'l1': lsvc = LinearSVC(C=0.1, penalty="l1", dual=False) fs = feature_selection.SelectFromModel(lsvc) elif fs == 'rfe': fs = feature_selection.RFE(estimator=model) pipe_params['feat_sel__n_features_to_select'] = n_components steps = [('feat_sel', fs)] + steps if dim_red is not None: if dim_red == 'pca': dr = decomposition.PCA() pipe_params['dim_red__n_components'] = n_components elif dim_red == 'ica': dr = decomposition.FastICA() pipe_params['dim_red__n_components'] = n_components steps = [('dim_red', dr)] + steps if scale: steps = [('scale', preprocessing.RobustScaler())] + steps pipe = Pipeline(steps) cv_results = [] cnt = 0 for train_idx, test_idx in outer_cv.split(X, y): X_train, X_test = X[train_idx], X[test_idx] y_train, y_test = y[train_idx], y[test_idx] opt_model = GridSearchCV(estimator=pipe, param_grid=pipe_params, verbose=0, n_jobs=njobs, cv=inner_cv) opt_model.fit(X_train, y_train) if verbose: if len(params.keys()) > 0: print 'Best paramaters for', name, \ ' (%d/%d):' % (cnt + 1, outer_cv.n_splits) print opt_model.best_params_ predictions = opt_model.predict(X_test) cv_results.append(metrics.accuracy_score(y_test, predictions)) cnt += 1 results.append(cv_results) names.append(name) if verbose: print '\n======' for model, res in zip(models, results): msg = "%s: %f (%f)" % (model[0], np.mean(res), np.std(res)) print(msg) print 'Chance: %f' % (1 / float(len(np.unique(y)))) print '======\n' return results, models
def month_ann(goal_type='atr', n_steps=10, delay=1, shape=[60, 1], train_alg="train_rprop", epochs=500, goal=0.000001, adapt=False, show=1): base_name = "train_anns/{}_{}_steps_{}_delay_{}_{}_adapt_{}".format( goal_type, n_steps, delay, train_alg, "_".join(map(str, shape)), adapt) base_path_name = "/".join([base_path, base_name]) str_template = "{}/{}_{}_mes" if not os.path.exists(base_path_name): os.makedirs(base_path_name) goal_row = 9 if goal_type == 'atr': data = ExcelDataReader("dados_usm_estruturados.xlsx", l1=1, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, goal_row)) else: data = ExcelDataReader("dados_usm_estruturados.xlsx", l1=1, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, 10)) gen_train_sets = open_dataset(n_steps=n_steps, delay=delay, start=0, stop=396) validation_set = open_dataset(n_steps=n_steps, delay=delay, start=396 - n_steps, stop=-1, validation=True) for k, v in gen_train_sets.items(): if len(validation_set[k]) > 0: mlp = TimeSeriesMLPMultivariate(shape, train_alg, error_function='sse') if train_alg != "train_ncg" and train_alg != "train_cg": tries = 0 print(v) while tries < 5: min_error = mlp.train(v, save_plot=True, filename=str_template.format( base_path_name, "train_stage", k), epochs=epochs, goal=goal, adapt=adapt, show=show) if min_error[-1] < 0.01: tries = 5 tries += 1 else: min_error = mlp.train(v, save_plot=True, filename=str_template.format( base_path_name, "train_stage", k), epochs=epochs, goal=goal) sim = mlp.sim(x_label="{} estimado".format(goal_type.upper()), y_label="{} real".format(goal_type.upper()), save_plot=True, filename=str_template.format(base_path_name, "estimado_scatter", k)) mlp.out(validation_set[k], x_label="{} previsto".format(goal_type.upper()), y_label="{} real".format(goal_type.upper()), save_plot=True, filename=str_template.format(base_path_name, "previsto_scatter", k)) predicted = mlp.out( validation_set[k], x_label="{} previsto".format(goal_type.upper()), y_label="{} real".format(goal_type.upper()), plot_type='plot', save_plot=True, filename=str_template.format(base_path_name, "previsto_line", k)) mlp.save(str_template.format(base_path_name, "ann", k)) try: r_q_est = r_sqrt(v, sim) r_q = r_sqrt(v, predicted) except: r_q_est = 0 r_q = 0 with open( "{}/{}".format(base_path_name, "params_{}_mes.txt".format(k)), "wb") as f: frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) for i in args: f.write( bytes("{}: {}\n".format(i, values[i]), encoding='utf8')) f.write( bytes("minimum error: {}\n".format(min_error[-1]), encoding='utf8')) f.write( bytes("r squared estimation: {}\n".format(r_q_est), encoding='utf8')) f.write( bytes("r squared forecast: {}\n".format(r_q), encoding='utf8')) with open( "{}/{}".format(base_path_name, "r_squared_estimated{}_mes.txt".format(k)), "wb") as f: f.write(bytes("{};\n".format(r_q_est), encoding='utf8')) with open( "{}/{}".format(base_path_name, "r_squared_forecast{}_mes.txt".format(k)), "wb") as f: f.write(bytes("{};\n".format(r_q), encoding='utf8'))
def print_verbose(self, frames, evalue=None, etype=None, context=5, long_header=False, include_vars=True): """Return a nice text document describing the traceback.""" # some locals try: etype = etype.__name__ except AttributeError: pass Colors = self.Colors # just a shorthand + quicker name lookup ColorsNormal = Colors.Normal # used a lot col_scheme = self.color_scheme_table.active_scheme_name indent = ' '*ultratb.INDENT_SIZE em_normal = '%s\n%s%s' % (Colors.valEm, indent,ColorsNormal) undefined = '%sundefined%s' % (Colors.em, ColorsNormal) exc = '%s%s%s' % (Colors.excName,etype,ColorsNormal) # some internal-use functions def text_repr(value): """Hopefully pretty robust repr equivalent.""" # this is pretty horrible but should always return *something* try: return pydoc.text.repr(value) except KeyboardInterrupt: raise except: try: return repr(value) except KeyboardInterrupt: raise except: try: # all still in an except block so we catch # getattr raising name = getattr(value, '__name__', None) if name: # ick, recursion return text_repr(name) klass = getattr(value, '__class__', None) if klass: return '%s instance' % text_repr(klass) except KeyboardInterrupt: raise except: return 'UNRECOVERABLE REPR FAILURE' def eqrepr(value, repr=text_repr): return '=%s' % repr(value) def nullrepr(value, repr=text_repr): return '' # meat of the code begins if long_header: # Header with the exception type, python version, and date pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) if etype is not None: head = '%s%s%s\n%s%s%s\n%s' % (Colors.topline, '-'*75, ColorsNormal, exc, ' '*(75-len(str(etype))-len(pyver)), pyver, date.rjust(75) ) head += "\nA problem occured executing Python code. Here is the sequence of function"\ "\ncalls leading up to the error, with the most recent (innermost) call last." else: head = '%s%s\n%s%s%s\n%s' % (Colors.topline, '-'*75, ColorsNormal, ' '*(75-len(pyver)), pyver, date.rjust(75) ) else: # Simplified header if etype is None: head = '%s%s\n%s%s' % (Colors.topline, '-'*75, ColorsNormal, 'Traceback (most recent call last)' ) else: head = '%s%s%s\n%s%s' % (Colors.topline, '-'*75, ColorsNormal,exc, 'Traceback (most recent call last)'.\ rjust(75 - len(str(etype)) ) ) frames_txt = [] # build some color string templates outside these nested loops tpl_link = '%s%%s%s' % (Colors.filenameEm,ColorsNormal) tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal) tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \ (Colors.vName, Colors.valEm, ColorsNormal) tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal) tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal, Colors.vName, ColorsNormal) tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal) tpl_line = '%s%%s%s %%s' % (Colors.lineno, ColorsNormal) tpl_line_em = '%s%%s%s %%s%s' % (Colors.linenoEm,Colors.line, ColorsNormal) # now, loop over all records printing context and info abspath = os.path.abspath for frame in frames: file, lnum, func, lines, index = inspect.getframeinfo(frame, context=context) #print '*** record:',file,lnum,func,lines,index # dbg if not file: file = '?' elif not(file.startswith(str("<")) and file.endswith(str(">"))): # Guess that filenames like <string> aren't real filenames, so # don't call abspath on them. try: file = abspath(file) except OSError: # Not sure if this can still happen: abspath now works with # file names like <string> pass file = py3compat.cast_unicode(file, util_path.fs_encoding) link = tpl_link % file args, varargs, varkw, locals = inspect.getargvalues(frame) if func == '?': call = '' else: # Decide whether to include variable details or not var_repr = include_vars and eqrepr or nullrepr try: call = tpl_call % (func,inspect.formatargvalues(args, varargs, varkw, locals,formatvalue=var_repr)) except KeyError: # This happens in situations like errors inside generator # expressions, where local variables are listed in the # line, but can't be extracted from the frame. I'm not # 100% sure this isn't actually a bug in inspect itself, # but since there's no info for us to compute with, the # best we can do is report the failure and move on. Here # we must *not* call any traceback construction again, # because that would mess up use of %debug later on. So we # simply report the failure and move on. The only # limitation will be that this frame won't have locals # listed in the call signature. Quite subtle problem... # I can't think of a good way to validate this in a unit # test, but running a script consisting of: # dict( (k,v.strip()) for (k,v) in range(10) ) # will illustrate the error, if this exception catch is # disabled. call = tpl_call_fail % func # Don't attempt to tokenize binary files. if file.endswith(('.so', '.pyd', '.dll')): frames_txt.append('%s %s\n' % (link,call)) continue elif file.endswith(('.pyc','.pyo')): # Look up the corresponding source file. file = openpy.source_from_cache(file) def linereader(file=file, lnum=[lnum], getline=ulinecache.getline): line = getline(file, lnum[0]) lnum[0] += 1 return line # Build the list of names on this line of code where the exception # occurred. try: names = [] name_cont = False for token_type, token, start, end, line in generate_tokens(linereader): # build composite names if token_type == tokenize.NAME and token not in keyword.kwlist: if name_cont: # Continuation of a dotted name try: names[-1].append(token) except IndexError: names.append([token]) name_cont = False else: # Regular new names. We append everything, the caller # will be responsible for pruning the list later. It's # very tricky to try to prune as we go, b/c composite # names can fool us. The pruning at the end is easy # to do (or the caller can print a list with repeated # names if so desired. names.append([token]) elif token == '.': name_cont = True elif token_type == tokenize.NEWLINE: break except (IndexError, UnicodeDecodeError): # signals exit of tokenizer pass except tokenize.TokenError as msg: _m = ("An unexpected error occurred while tokenizing input\n" "The following traceback may be corrupted or invalid\n" "The error message is: %s\n" % msg) error(_m) # Join composite names (e.g. "dict.fromkeys") names = ['.'.join(n) for n in names] # prune names list of duplicates, but keep the right order unique_names = uniq_stable(names) # Start loop over vars lvals = [] if include_vars: for name_full in unique_names: name_base = name_full.split('.',1)[0] if name_base in frame.f_code.co_varnames: if name_base in locals: try: value = repr(eval(name_full,locals)) except: value = undefined else: value = undefined name = tpl_local_var % name_full else: if name_base in frame.f_globals: try: value = repr(eval(name_full,frame.f_globals)) except: value = undefined else: value = undefined name = tpl_global_var % name_full lvals.append(tpl_name_val % (name,value)) if lvals: lvals = '%s%s' % (indent,em_normal.join(lvals)) else: lvals = '' level = '%s %s\n' % (link,call) if index is None: frames_txt.append(level) else: frames_txt.append('%s%s' % (level,''.join( _format_traceback_lines(lnum,index,lines,Colors,lvals, col_scheme)))) # Get (safely) a string form of the exception info if evalue is not None and etype is not None: try: etype_str,evalue_str = map(str,(etype,evalue)) except: # User exception is improperly defined. etype,evalue = str,sys.exc_info()[:2] etype_str,evalue_str = map(str,(etype,evalue)) # ... and format it exception = ['%s%s%s: %s' % (Colors.excName, etype_str, ColorsNormal, py3compat.cast_unicode(evalue_str))] if (not py3compat.PY3) and type(evalue) is types.InstanceType: try: names = [w for w in dir(evalue) if isinstance(w, py3compat.string_types)] except: # Every now and then, an object with funny inernals blows up # when dir() is called on it. We do the best we can to report # the problem and continue _m = '%sException reporting error (object with broken dir())%s:' exception.append(_m % (Colors.excName,ColorsNormal)) etype_str,evalue_str = map(str,sys.exc_info()[:2]) exception.append('%s%s%s: %s' % (Colors.excName,etype_str, ColorsNormal, py3compat.cast_unicode(evalue_str))) names = [] for name in names: value = text_repr(getattr(evalue, name)) exception.append('\n%s%s = %s' % (indent, name, value)) exp_list = [''.join(exception[0]), '\n'] else: exp_list = [] # vds: >> if frames: filepath, lnum, _, _, _ = inspect.getframeinfo(frames[-1]) filepath = os.path.abspath(filepath) ipinst = get_ipython() if ipinst is not None: ipinst.hooks.synchronize_with_editor(filepath, lnum, 0) # vds: << # return all our info assembled as a single string # return '%s\n\n%s\n%s' % (head,'\n'.join(frames_txt),''.join(exception[0]) ) return self._output_list([head] + frames_txt + exp_list, sep='\n')
def html(einfo, context=5): """Return a nice HTML document describing a given traceback.""" etype, evalue, etb = einfo if isinstance(etype, type): etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading( '<big><big>%s</big></big>' % strong(pydoc.html.escape(str(etype))), '#ffffff', '#6622aa', pyver + '<br>' + date) + ''' <p>A problem occurred in a Python script. Here is the sequence of function calls leading up to the error, in the order they occurred.</p>''' indent = '<tt>' + small(' ' * 5) + ' </tt>' frames = [] records = inspect.getinnerframes(etb, context) for frame, file, lnum, func, lines, index in records: if file: file = os.path.abspath(file) link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file)) else: file = link = '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = 'in ' + strong(pydoc.html.escape(func)) if func != "<module>": call += inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.html.repr(value)) highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = scanvars(reader, frame, locals) rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' % ('<big> </big>', link, call)] if index is not None: i = lnum - index for line in lines: num = small(' ' * (5-len(str(i))) + str(i)) + ' ' if i in highlight: line = '<tt>=>%s%s</tt>' % (num, pydoc.html.preformat(line)) rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line) else: line = '<tt> %s%s</tt>' % (num, pydoc.html.preformat(line)) rows.append('<tr><td>%s</td></tr>' % grey(line)) i += 1 done, dump = {}, [] for name, where, value in vars: if name in done: continue done[name] = 1 if value is not __UNDEF__: if where in ('global', 'builtin'): name = ('<em>%s</em> ' % where) + strong(name) elif where == 'local': name = strong(name) else: name = where + strong(name.split('.')[-1]) dump.append('%s = %s' % (name, pydoc.html.repr(value))) else: dump.append(name + ' <em>undefined</em>') rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump)))) frames.append(''' <table width="100%%" cellspacing=0 cellpadding=0 border=0> %s</table>''' % '\n'.join(rows)) exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))), pydoc.html.escape(str(evalue)))] for name in dir(evalue): if name[:1] == '_': continue value = pydoc.html.repr(getattr(evalue, name)) exception.append('\n<br>%s%s =\n%s' % (indent, name, value)) return head + ''.join(frames) + ''.join(exception) + ''' <!-- The above is a description of an error in a Python program, formatted for a web browser because the 'cgitb' module was enabled. In case you are not reading this in a web browser, here is the original traceback: %s --> ''' % pydoc.html.escape( ''.join(traceback.format_exception(etype, evalue, etb)))
def format_records(records): # , print_globals=False): # Loop over all records printing context and info frames = [] abspath = os.path.abspath for frame, file, lnum, func, lines, index in records: try: file = file and abspath(file) or '?' except OSError: # if file is '<console>' or something not in the filesystem, # the abspath call will throw an OSError. Just ignore it and # keep the original file string. pass if file.endswith('.pyc'): file = file[:-4] + '.py' link = file args, varargs, varkw, locals = inspect.getargvalues(frame) if func == '?': call = '' else: # Decide whether to include variable details or not try: call = 'in %s%s' % ( func, inspect.formatargvalues( args, varargs, varkw, locals, formatvalue=eq_repr)) except KeyError: # Very odd crash from inspect.formatargvalues(). The # scenario under which it appeared was a call to # view(array,scale) in NumTut.view.view(), where scale had # been defined as a scalar (it should be a tuple). Somehow # inspect messes up resolving the argument list of view() # and barfs out. At some point I should dig into this one # and file a bug report about it. print("\nJoblib's exception reporting continues...\n") call = 'in %s(***failed resolving arguments***)' % func # Initialize a list of names on the current line, which the # tokenizer below will populate. names = [] def tokeneater(token_type, token, start, end, line): """Stateful tokeneater which builds dotted names. The list of names it appends to (from the enclosing scope) can contain repeated composite names. This is unavoidable, since there is no way to disambiguate partial dotted structures until the full list is known. The caller is responsible for pruning the final list of duplicates before using it.""" # build composite names if token == '.': try: names[-1] += '.' # store state so the next token is added for x.y.z names tokeneater.name_cont = True return except IndexError: pass if token_type == tokenize.NAME and token not in keyword.kwlist: if tokeneater.name_cont: # Dotted names names[-1] += token tokeneater.name_cont = False else: # Regular new names. We append everything, the caller # will be responsible for pruning the list later. It's # very tricky to try to prune as we go, b/c composite # names can fool us. The pruning at the end is easy # to do (or the caller can print a list with repeated # names if so desired. names.append(token) elif token_type == tokenize.NEWLINE: raise IndexError # we need to store a bit of state in the tokenizer to build # dotted names tokeneater.name_cont = False def linereader(file=file, lnum=[lnum], getline=linecache.getline): line = getline(file, lnum[0]) lnum[0] += 1 return line # Build the list of names on this line of code where the exception # occurred. try: # This builds the names list in-place by capturing it from the # enclosing scope. for token in generate_tokens(linereader): tokeneater(*token) except (IndexError, UnicodeDecodeError): # signals exit of tokenizer pass except tokenize.TokenError as msg: _m = ( "An unexpected error occurred while tokenizing input file %s\n" "The following traceback may be corrupted or invalid\n" "The error message is: %s\n" % (file, msg)) print(_m) # prune names list of duplicates, but keep the right order unique_names = uniq_stable(names) # Start loop over vars lvals = [] for name_full in unique_names: name_base = name_full.split('.', 1)[0] if name_base in frame.f_code.co_varnames: if name_base in locals.keys(): try: value = safe_repr(eval(name_full, locals)) except: value = "undefined" else: value = "undefined" name = name_full lvals.append('%s = %s' % (name, value)) #elif print_globals: # if frame.f_globals.has_key(name_base): # try: # value = safe_repr(eval(name_full,frame.f_globals)) # except: # value = "undefined" # else: # value = "undefined" # name = 'global %s' % name_full # lvals.append('%s = %s' % (name,value)) if lvals: lvals = '%s%s' % (INDENT, ('\n%s' % INDENT).join(lvals)) else: lvals = '' level = '%s\n%s %s\n' % (75 * '.', link, call) if index is None: frames.append(level) else: frames.append('%s%s' % (level, ''.join( _format_traceback_lines(lnum, index, lines, lvals)))) return frames
def _pymake_compile( srcfiles, target, fc, cc, expedite, dryrun, double, debug, fflags, cflags, syslibs, arch, intelwin, sharedobject, verbose, ): """Standard compile method. Parameters ------- srcfiles : list list of source file names target : str path for executable to create fc : str fortran compiler cc : str c or cpp compiler expedite : bool boolean indicating if only out of date source files will be compiled. Clean must not have been used on previous build. dryrun : bool boolean indicating if source files should be compiled. Files will be deleted, if makeclean is True. double : bool boolean indicating a compiler switch will be used to create an executable with double precision real variables. debug : bool boolean indicating is a debug executable will be built fflags : list user provided list of fortran compiler flags cflags : list user provided list of c or cpp compiler flags syslibs : list user provided syslibs arch : str architecture to use for Intel Compilers on Windows (default is intel64) intelwin : bool boolean indicating if pymake was used to compile source code on Windows using Intel compilers sharedobject : bool boolean indicating a shared object (.so or .dll) will be built verbose : bool boolean indicating if output will be printed to the terminal inplace : bool boolean indicating that the source files in srcdir, srcdir2, and defined in extrafiles will be used directly. If inplace is True, source files will be copied to a directory named srcdir_temp. (default is False) Returns ------- returncode : int returncode """ # write pymake setting if verbose: msg = ("\nPymake settings in {}\n".format(_pymake_compile.__name__) + 40 * "-") print(msg) frame = inspect.currentframe() fnargs, _, _, values = inspect.getargvalues(frame) for arg in fnargs: value = values[arg] if not value: value = "None" elif isinstance(value, list): value = ", ".join(value) print(" {}={}".format(arg, value)) # initialize returncode returncode = 0 # initialize ilink ilink = 0 # set optimization levels optlevel = _get_optlevel(target, fc, cc, debug, fflags, cflags, verbose=verbose) # get fortran and c compiler switches tfflags = _get_fortran_flags( target, fc, fflags, debug, double, sharedobject=sharedobject, verbose=verbose, ) tcflags = _get_c_flags( target, cc, cflags, debug, srcfiles, sharedobject=sharedobject, verbose=verbose, ) # get linker flags and syslibs lc, tlflags = _get_linker_flags( target, fc, cc, syslibs, srcfiles, sharedobject=sharedobject, verbose=verbose, ) # clean exe prior to build so that test for exe below can return a # non-zero error code if os.path.isfile(target): if verbose: msg = "removing existing target with same name: {}".format(target) print(msg) os.remove(target) if intelwin: # update compiler names if necessary ext = ".exe" if fc is not None: if ext not in fc: fc += ext if cc is not None: if ext not in cc: cc += ext if ext not in lc: lc += ext # update target extension if sharedobject: program_path, ext = os.path.splitext(target) if ext.lower() != ".dll": target = program_path + ".dll" else: if ext not in target: target += ext # delete the batch file if it exists batchfile = "compile.bat" if os.path.isfile(batchfile): try: os.remove(batchfile) except: if verbose: print("could not remove '{}'".format(batchfile)) # Create target using a batch file on Windows try: _create_win_batch( batchfile, fc, cc, lc, optlevel, tfflags, tcflags, tlflags, srcfiles, target, arch, sharedobject, ) # build the command list for the Windows batch file cmdlists = [ batchfile, ] except: errmsg = "Could not make x64 target: {}\n".format(target) errmsg += traceback.print_exc() print(errmsg) else: if sharedobject: program_path, ext = os.path.splitext(target) if _get_osname() == "win32": if ext.lower() != ".dll": target = program_path + ".dll" else: if ext.lower() != ".so": target = program_path + ".so" # initialize the commands and object files list cmdlists = [] objfiles = [] # assume that header files may be in other folders, so make a list searchdir = [] for f in srcfiles: dirname = os.path.dirname(f) if dirname not in searchdir: searchdir.append(dirname) # build the command for each source file and add to the # list of commands for srcfile in srcfiles: cmdlist = [] iscfile = False ext = os.path.splitext(srcfile)[1].lower() if ext in [".c", ".cpp"]: # mja iscfile = True cmdlist.append(cc) # mja cmdlist.append(optlevel) for switch in tcflags: # mja cmdlist.append(switch) # mja else: # mja cmdlist.append(fc) cmdlist.append(optlevel) for switch in tfflags: cmdlist.append(switch) # add search path for any c and c++ header files if iscfile: for sd in searchdir: cmdlist.append("-I{}".format(sd)) # put object files and module files in objdir_temp and moddir_temp else: cmdlist.append("-I{}".format(objdir_temp)) if fc in ["ifort", "mpiifort"]: cmdlist.append("-module") cmdlist.append(moddir_temp + "/") else: cmdlist.append("-J{}".format(moddir_temp)) cmdlist.append("-c") cmdlist.append(srcfile) # object file name and location srcname, srcext = os.path.splitext(srcfile) srcname = srcname.split(os.path.sep)[-1] objfile = os.path.join(objdir_temp, srcname + ".o") cmdlist.append("-o") cmdlist.append(objfile) # Save the name of the object file for linker objfiles.append(objfile) # If expedited, then check if object file is out of date, if it # exists. No need to compile if object file is newer. compilefile = True if expedite: if not _check_out_of_date(srcfile, objfile): compilefile = False if compilefile: cmdlists.append(cmdlist) # Build the link command and then link to create the executable ilink = len(cmdlists) if ilink > 0: cmdlist = [lc, optlevel] cmdlist.append("-o") cmdlist.append(target) for objfile in objfiles: cmdlist.append(objfile) # linker switches for switch in tlflags: cmdlist.append(switch) # add linker command to the commands list cmdlists.append(cmdlist) # execute each command in cmdlists if not dryrun: for idx, cmdlist in enumerate(cmdlists): if idx == 0: if intelwin: msg = ( "\nCompiling '{}' ".format(os.path.basename(target)) + "for Windows using Intel compilers...") else: msg = "\nCompiling object files for " + "'{}'...".format( os.path.basename(target)) print(msg) if idx > 0 and idx == ilink: msg = "\nLinking object files " + "to make '{}'...".format( os.path.basename(target)) print(msg) # write the command to the terminal _process_Popen_command(False, cmdlist) # run the command using Popen proc = _process_Popen_initialize(cmdlist, intelwin) # write batch file execution to terminal if intelwin: _process_Popen_stdout(proc) # establish communicator to report errors else: _process_Popen_communicate(proc) # evaluate return code returncode = proc.returncode if returncode != 0: msg = "compilation failed on '{}'".format(" ".join(cmdlist)) print(msg) break # print blank line separator after all commands in cmdlist are executed print("") # return return returncode
def read_feather(path, columns=None, use_threads: bool = True): _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) return DataFrame(query_compiler=BaseFactory.read_feather(**kwargs))
def read_pickle( filepath_or_buffer: FilePathOrBuffer, compression: Optional[str] = "infer" ): _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) return DataFrame(query_compiler=BaseFactory.read_pickle(**kwargs))
def print_function_args_values(frame): args, _, _, values = inspect.getargvalues(frame) print('Function name "%s"' % inspect.getframeinfo(frame)[2]) for arg in args: print(" %s = %s" % (arg, values[arg]))
def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) kwargs.update(kwargs.pop("kwargs", {})) return DataFrame(query_compiler=BaseFactory.read_clipboard(**kwargs))
def snapshot(info=None, context=5, code=None, environment=None): """Return a dict describing a given traceback (based on cgitb.text).""" import time import linecache import inspect import pydoc import cgitb # if no exception info given, get current: etype, evalue, etb = info or sys.exc_info() if isinstance(etype, ClassType): etype = etype.__name__ # create a snapshot dict with some basic information s = {} s['pyver'] = 'Python ' + sys.version.split( )[0] + ': ' + sys.executable + ' (prefix: %s)' % sys.prefix s['date'] = time.ctime(time.time()) # start to process frames records = inspect.getinnerframes(etb, context) del etb # Prevent circular references that would cause memory leaks s['frames'] = [] for frame, file, lnum, func, lines, index in records: file = file and os.path.abspath(file) or '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = inspect.formatargvalues( args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.text.repr(value)) # basic frame information f = { 'file': file, 'func': func, 'call': call, 'lines': {}, 'lnum': lnum } highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = cgitb.scanvars(reader, frame, locals) # if it is a view, replace with generated code if file.endswith('html'): lmin = lnum > context and (lnum - context) or 0 lmax = lnum + context lines = code.split("\n")[lmin:lmax] index = min(context, lnum) - 1 if index is not None: i = lnum - index for line in lines: f['lines'][i] = line.rstrip() i += 1 # dump local variables (referenced in current line only) f['dump'] = {} for name, where, value in vars: if name in f['dump']: continue if value is not cgitb.__UNDEF__: if where == 'global': name = 'global ' + name elif where != 'local': name = where + name.split('.')[-1] f['dump'][name] = pydoc.text.repr(value) else: f['dump'][name] = 'undefined' s['frames'].append(f) # add exception type, value and attributes s['etype'] = str(etype) s['evalue'] = str(evalue) s['exception'] = {} if isinstance(evalue, BaseException): for name in dir(evalue): value = pydoc.text.repr(getattr(evalue, name)) s['exception'][name] = value # add all local values (of last frame) to the snapshot s['locals'] = {} for name, value in locals.items(): s['locals'][name] = pydoc.text.repr(value) # add web2py environment variables for k, v in environment.items(): if k in ('request', 'response', 'session'): s[k] = XML(str(BEAUTIFY(v))) return s
def _tb_error(self, exc_info): klass, exc, trace = exc_info frames = [] cur_tb = trace while cur_tb: tb_frame = cur_tb.tb_frame cur_tb = cur_tb.tb_next if not isinstance(tb_frame, types.FrameType): continue cur_frame = { 'filename': tb_frame.f_code.co_filename, 'lineno': tb_frame.f_lineno, 'method': tb_frame.f_code.co_name, 'line': linecache.getline(tb_frame.f_code.co_filename, tb_frame.f_lineno), } argspec = None varargspec = None keywordspec = None _locals = {} try: arginfo = inspect.getargvalues(tb_frame) argspec = arginfo.args if arginfo.varargs is not None: varargspec = arginfo.varargs temp_varargs = list(arginfo.locals[varargspec]) for i, arg in enumerate(temp_varargs): temp_varargs[i] = '***' arginfo.locals[varargspec] = tuple(temp_varargs) if arginfo.keywords is not None: keywordspec = arginfo.keywords _locals.update(list(arginfo.locals.items())) except Exception: self.logger.critical( 'Error while extracting arguments from frames.', exc_info=True) if argspec: cur_frame['argspec'] = argspec if varargspec: cur_frame['varargspec'] = varargspec if keywordspec: cur_frame['keywordspec'] = keywordspec if _locals: try: cur_frame['locals'] = { k: repr(v) for k, v in _locals.items() } except Exception: # repr() may fail since it may be one of the reasons # of the exception cur_frame['locals'] = {} frames.append(cur_frame) return { 'class': klass.__name__, 'frames': frames, 'formatted': ''.join(traceback.format_exception(*exc_info)), }
def classify_roc(X, y, verbose=False, nfolds=5, dim_red=None, n_components=[5, 10, 20], scale=True, fs=None, njobs=1, LR_C=[.01, .1, 1, 10, 100], LR_class_weight=[None, 'balanced'], SVC_C=[.01, .1, 1, 10, 100], SVC_class_weight=[None, 'balanced'], SVC_kernels=['rbf', 'linear', 'poly'], n_estimators=[10, 20, 30], max_features=['auto', 'log2', None], shuffle=False, **kwargs): y_bin = label_binarize(y, classes=np.unique(y)) n_classes = y_bin.shape[1] # spit out to the screen the function parameters, for logging if verbose: import inspect frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) print 'function name "%s"' % inspect.getframeinfo(frame)[2] for i in args[2:]: print " %s = %s" % (i, values[i]) # prepare configuration for cross validation test harness num_instances = len(X) seed = 8 # prepare models models = [] # all these support multiclass: # http://scikit-learn.org/stable/modules/multiclass.html models.append(('LR', LogisticRegression(multi_class='ovr', solver='liblinear'), {"C": LR_C, "class_weight": LR_class_weight, "penalty": ['l1', 'l2']})) models.append(('LDA', LinearDiscriminantAnalysis(), {})) models.append(('RndFor', RandomForestClassifier(), {'n_estimators': n_estimators, 'max_features': max_features})) models.append(('NB', GaussianNB(), {})) models.append(('SVC', SVC(probability=True, decision_function_shape='ovr'), {"C": SVC_C, "class_weight": SVC_class_weight, 'kernel': SVC_kernels})) models.append(('Most frequent', DummyClassifier(strategy='most_frequent'), {})) models.append(('Stratified', DummyClassifier(strategy='stratified'), {})) # spit out to the screen the parameters to be tried in each classifier if verbose: print 'Trying these parameters:' for m in models: print m[0], ':', m[2] # evaluate each model in turn results = [] names = [] for name, model, params in models: # need to create the CV objects inside the loop because they get used # and not get reset! if shuffle: inner_cv = StratifiedShuffleSplit(n_splits=nfolds, test_size=.1, random_state=seed) outer_cv = StratifiedShuffleSplit(n_splits=nfolds, test_size=.1, random_state=seed) else: # do this if no shuffling is wanted inner_cv = StratifiedKFold(n_splits=nfolds, random_state=seed) outer_cv = StratifiedKFold(n_splits=nfolds, random_state=seed) steps = [('clf', OneVsRestClassifier(model))] pipe_params = {} for key, val in params.iteritems(): key_name = 'clf__estimator__%s' % key pipe_params[key_name] = val if fs == 'l1': lsvc = LinearSVC(C=0.1, penalty="l1", dual=False) fs = feature_selection.SelectFromModel(lsvc) elif fs == 'rfe': fs = feature_selection.RFE(estimator=model) pipe_params['feat_sel__n_features_to_select'] = n_components steps = [('feat_sel', fs)] + steps if dim_red is not None: if dim_red == 'pca': dr = decomposition.PCA() pipe_params['dim_red__n_components'] = n_components elif dim_red == 'ica': dr = decomposition.FastICA() pipe_params['dim_red__n_components'] = n_components steps = [('dim_red', dr)] + steps if scale: steps = [('scale', preprocessing.RobustScaler())] + steps pipe = Pipeline(steps) cv_results = [] cnt = 0 for train_idx, test_idx in outer_cv.split(X, y): X_train, X_test = X[train_idx], X[test_idx] y_train, y_test = y_bin[train_idx], y_bin[test_idx] opt_model = GridSearchCV(estimator=pipe, param_grid=pipe_params, verbose=0, n_jobs=njobs, scoring='roc_auc') opt_model.fit(X_train, y_train) if verbose: if len(params.keys()) > 0: print 'Best paramaters for', name, \ ' (%d/%d):' % (cnt + 1, outer_cv.n_splits) print opt_model.best_params_ y_score = opt_model.predict_proba(X_test) fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) cv_results.append({'fpr': fpr, 'tpr': tpr, 'roc_auc': roc_auc}) cnt += 1 results.append(cv_results) names.append(name) return results, models
def train(name="module", nof_iterations=100, learning_rate=0.0001, learning_rate_steps=1000, learning_rate_decay=0.5, load_module_name="module.ckpt", use_saved_module=False, batch_size=20, pred_pos_neg_ratio=10, lr_object_coeff=4, layers=[500, 500, 500], gpu=0): """ Train SGP module given train parameters and module hyper-parameters :param name: name of the train session :param nof_iterations: number of epochs :param learning_rate: :param learning_rate_steps: decay after number of steps :param learning_rate_decay: the factor to decay the learning rate :param load_module_name: name of already trained module weights to load :param use_saved_module: start from already train module :param batch_size: number of images in each mini-batch :param pred_pos_neg_ratio: Set the loss ratio between positive and negatives (not labeled) predicates :param lr_object_coeff: Set the loss ratio between objects and predicates :param layers: list of sizes of the hidden layer of the predicate and object classifier :param gpu: gpu number to use for the training :return: nothing """ gpi_type = "Linguistic" including_object = True # get filesmanager filesmanager = FilesManager() # create logger logger_path = filesmanager.get_file_path("logs") logger_path = os.path.join(logger_path, name) logger = Logger(name, logger_path) # print train params frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) logger.log('function name "%s"' % inspect.getframeinfo(frame)[2]) for i in args: logger.log(" %s = %s" % (i, values[i])) # set gpu if gpu != None: os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu) logger.log("os.environ[\"CUDA_VISIBLE_DEVICES\"] = " + str(gpu)) # create module module = Module(gpi_type=gpi_type, nof_predicates=NOF_PREDICATES, nof_objects=NOF_OBJECTS, is_train=True, learning_rate=learning_rate, learning_rate_steps=learning_rate_steps, learning_rate_decay=learning_rate_decay, lr_object_coeff=lr_object_coeff, including_object=including_object, layers=layers) ## # get module place holders # # get input place holders confidence_relation_ph, confidence_entity_ph, bb_ph, word_embed_relations_ph, word_embed_entities_ph = module.get_in_ph( ) # get labels place holders labels_relation_ph, labels_entity_ph, labels_coeff_loss_ph = module.get_labels_ph( ) # get loss and train step loss, gradients, grad_placeholder, train_step = module.get_module_loss() ## # get module output out_relation_probes, out_entity_probes = module.get_output() # Initialize the Computational Graph init = tf.global_variables_initializer() # Add ops to save and restore all the variables. variables = tf.contrib.slim.get_variables_to_restore() variables_to_restore = variables saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: # Restore variables from disk. module_path = filesmanager.get_file_path("sg_module.train.saver") module_path_load = os.path.join(module_path, load_module_name) if os.path.exists(module_path_load + ".index") and use_saved_module: saver.restore(sess, module_path_load) logger.log("Model restored.") else: sess.run(init) # train images vg_train_path = filesmanager.get_file_path("data.visual_genome.train") # list of train files train_files_list = range(2, 72) shuffle(train_files_list) # Actual validation is 5 files. # After tunning the hyper parameters, use just 2 files for early stopping. validation_files_list = range(2) # create one hot vector for predicate_negative (i.e. not labeled) relation_neg = np.zeros(NOF_PREDICATES) relation_neg[NOF_PREDICATES - 1] = 1 # object embedding embed_obj = FilesManager().load_file( "language_module.word2vec.object_embeddings") embed_pred = FilesManager().load_file( "language_module.word2vec.predicate_embeddings") embed_pred = np.concatenate( (embed_pred, np.zeros(embed_pred[:1].shape)), axis=0) # concat negative represntation # train module lr = learning_rate best_test_loss = -1 baseline_path = filesmanager.get_file_path( "data.visual_genome.train_baseline") for epoch in xrange(1, nof_iterations): accum_results = None total_loss = 0 steps = [] # read data file_index = -1 for file_name in train_files_list: file_index += 1 # load data from file file_path = os.path.join(vg_train_path, str(file_name) + ".p") file_handle = open(file_path, "rb") train_images = cPickle.load(file_handle) file_handle.close() shuffle(train_images) for image in train_images: # load initial belief by baseline detector file_path = os.path.join(baseline_path, str(image.image.id) + ".p") if not os.path.exists(file_path): continue file_handle = open(file_path, "rb") decetctor_data = cPickle.load(file_handle) file_handle.close() image.predicates_outputs_with_no_activation = decetctor_data[ "rel_dist_mapped"] image.objects_outputs_with_no_activations = decetctor_data[ "obj_dist_mapped"] # set diagonal to be negative predicate (no relation for a single object) indices = np.arange( image.predicates_outputs_with_no_activation.shape[0]) image.predicates_outputs_with_no_activation[ indices, indices, :] = relation_neg image.predicates_labels[indices, indices, :] = relation_neg # spatial features entity_bb = np.zeros((len(image.objects), 14)) for obj_id in range(len(image.objects)): entity_bb[obj_id][0] = image.objects[obj_id].x / 1200.0 entity_bb[obj_id][1] = image.objects[obj_id].y / 1200.0 entity_bb[obj_id][2] = ( image.objects[obj_id].x + image.objects[obj_id].width) / 1200.0 entity_bb[obj_id][3] = ( image.objects[obj_id].y + image.objects[obj_id].height) / 1200.0 entity_bb[obj_id][4] = image.objects[obj_id].x entity_bb[obj_id][5] = -1 * image.objects[obj_id].x entity_bb[obj_id][6] = image.objects[obj_id].y entity_bb[obj_id][7] = -1 * image.objects[obj_id].y entity_bb[obj_id][8] = image.objects[ obj_id].width * image.objects[obj_id].height entity_bb[obj_id][9] = -1 * image.objects[ obj_id].width * image.objects[obj_id].height entity_bb[:, 4] = np.argsort(entity_bb[:, 4]) entity_bb[:, 5] = np.argsort(entity_bb[:, 5]) entity_bb[:, 6] = np.argsort(entity_bb[:, 6]) entity_bb[:, 7] = np.argsort(entity_bb[:, 7]) entity_bb[:, 8] = np.argsort(entity_bb[:, 8]) entity_bb[:, 9] = np.argsort(entity_bb[:, 9]) entity_bb[:, 10] = np.argsort( np.max(image.objects_outputs_with_no_activations, axis=1)) entity_bb[:, 11] = np.argsort(-1 * np.max( image.objects_outputs_with_no_activations, axis=1)) entity_bb[:, 12] = np.arange(entity_bb.shape[0]) entity_bb[:, 13] = np.arange(entity_bb.shape[0], 0, -1) # filter non mixed cases relations_neg_labels = image.predicates_labels[:, :, NOF_PREDICATES - 1:] if np.sum(image.predicates_labels[:, :, :NOF_PREDICATES - 1]) == 0: continue if including_object: in_entity_confidence = image.objects_outputs_with_no_activations else: in_entity_confidence = image.objects_labels * 1000 # give lower weight to negatives coeff_factor = np.ones(relations_neg_labels.shape) factor = float( np.sum( image.predicates_labels[:, :, :NOF_PREDICATES - 2]) ) / np.sum(relations_neg_labels) / pred_pos_neg_ratio coeff_factor[relations_neg_labels == 1] *= factor coeff_factor[indices, indices] = 0 # create the feed dictionary feed_dict = { confidence_relation_ph: image.predicates_outputs_with_no_activation, confidence_entity_ph: in_entity_confidence, bb_ph: entity_bb, module.phase_ph: True, word_embed_entities_ph: embed_obj, word_embed_relations_ph: embed_pred, labels_relation_ph: image.predicates_labels, labels_entity_ph: image.objects_labels, labels_coeff_loss_ph: coeff_factor.reshape((-1)), module.lr_ph: lr } # run the network out_relation_probes_val, out_entity_probes_val, loss_val, gradients_val = \ sess.run([out_relation_probes, out_entity_probes, loss, gradients], feed_dict=feed_dict) if math.isnan(loss_val): print("NAN") continue # set diagonal to be neg (in order not to take into account in statistics) out_relation_probes_val[indices, indices, :] = relation_neg # append gradient to list (will be applied as a batch of entities) steps.append(gradients_val) # statistic total_loss += loss_val results = test(image.predicates_labels, image.objects_labels, out_relation_probes_val, out_entity_probes_val) # accumulate results if accum_results is None: accum_results = results else: for key in results: accum_results[key] += results[key] if len(steps) == batch_size: # apply steps step = steps[0] feed_grad_apply_dict = { grad_placeholder[j][0]: step[j][0] for j in xrange(len(grad_placeholder)) } for i in xrange(1, len(steps)): step = steps[i] for j in xrange(len(grad_placeholder)): feed_grad_apply_dict[grad_placeholder[j] [0]] += step[j][0] feed_grad_apply_dict[module.lr_ph] = lr sess.run([train_step], feed_dict=feed_grad_apply_dict) steps = [] # print stat - per file just for the first epoch - disabled!! if epoch == 1: obj_accuracy = float(accum_results['entity_correct'] ) / accum_results['entity_total'] predicate_pos_accuracy = float( accum_results['relations_pos_correct'] ) / accum_results['relations_pos_total'] relationships_pos_accuracy = float( accum_results['relationships_pos_correct'] ) / accum_results['relations_pos_total'] logger.log( "iter %d.%d - obj %f - pred %f - relation %f" % (epoch, file_index, obj_accuracy, predicate_pos_accuracy, relationships_pos_accuracy)) # print stat per epoch obj_accuracy = float(accum_results['entity_correct'] ) / accum_results['entity_total'] predicate_pos_accuracy = float( accum_results['relations_pos_correct'] ) / accum_results['relations_pos_total'] predicate_all_accuracy = float(accum_results['relations_correct'] ) / accum_results['relations_total'] relationships_pos_accuracy = float( accum_results['relationships_pos_correct'] ) / accum_results['relations_pos_total'] relationships_all_accuracy = float( accum_results['relationships_correct'] ) / accum_results['relations_total'] logger.log( "iter %d - loss %f - obj %f - pred %f - rela %f - all_pred %f - all rela %f - lr %f" % (epoch, total_loss, obj_accuracy, predicate_pos_accuracy, relationships_pos_accuracy, predicate_all_accuracy, relationships_all_accuracy, lr)) # run validation if epoch % TEST_ITERATIONS == 0: total_test_loss = 0 accum_test_results = None for file_name in validation_files_list: # load data from file file_path = os.path.join(vg_train_path, str(file_name) + ".p") file_handle = open(file_path, "rb") validation_images = cPickle.load(file_handle) file_handle.close() for image in validation_images: file_path = os.path.join(baseline_path, str(image.image.id) + ".p") if not os.path.exists(file_path): continue file_handle = open(file_path, "rb") detector_data = cPickle.load(file_handle) file_handle.close() image.predicates_outputs_with_no_activation = detector_data[ "rel_dist_mapped"] image.objects_outputs_with_no_activations = detector_data[ "obj_dist_mapped"] # set diagonal to be neg indices = np.arange( image.predicates_outputs_with_no_activation. shape[0]) image.predicates_outputs_with_no_activation[ indices, indices, :] = relation_neg image.predicates_labels[indices, indices, :] = relation_neg # get shape of extended object to be used by the module extended_confidence_object_shape = np.asarray( image.predicates_outputs_with_no_activation.shape) extended_confidence_object_shape[2] = NOF_OBJECTS # spatial features entity_bb = np.zeros((len(image.objects), 14)) for obj_id in range(len(image.objects)): entity_bb[obj_id][ 0] = image.objects[obj_id].x / 1200.0 entity_bb[obj_id][ 1] = image.objects[obj_id].y / 1200.0 entity_bb[obj_id][2] = ( image.objects[obj_id].x + image.objects[obj_id].width) / 1200.0 entity_bb[obj_id][3] = ( image.objects[obj_id].y + image.objects[obj_id].height) / 1200.0 entity_bb[obj_id][4] = image.objects[obj_id].x entity_bb[obj_id][5] = -1 * image.objects[obj_id].x entity_bb[obj_id][6] = image.objects[obj_id].y entity_bb[obj_id][7] = -1 * image.objects[obj_id].y entity_bb[obj_id][8] = image.objects[ obj_id].width * image.objects[obj_id].height entity_bb[obj_id][9] = -1 * image.objects[ obj_id].width * image.objects[obj_id].height entity_bb[:, 4] = np.argsort(entity_bb[:, 4]) entity_bb[:, 5] = np.argsort(entity_bb[:, 5]) entity_bb[:, 6] = np.argsort(entity_bb[:, 6]) entity_bb[:, 7] = np.argsort(entity_bb[:, 7]) entity_bb[:, 8] = np.argsort(entity_bb[:, 8]) entity_bb[:, 9] = np.argsort(entity_bb[:, 9]) entity_bb[:, 10] = np.argsort( np.max(image.objects_outputs_with_no_activations, axis=1)) entity_bb[:, 11] = np.argsort(-1 * np.max( image.objects_outputs_with_no_activations, axis=1)) entity_bb[:, 12] = np.arange(entity_bb.shape[0]) entity_bb[:, 13] = np.arange(entity_bb.shape[0], 0, -1) # filter non mixed cases relations_neg_labels = image.predicates_labels[:, :, NOF_PREDICATES - 1:] if np.sum( image.predicates_labels[:, :, :NOF_PREDICATES - 1]) == 0: continue # give lower weight to negatives coeff_factor = np.ones(relations_neg_labels.shape) factor = float( np.sum( image.predicates_labels[:, :, :NOF_PREDICATES - 2]) ) / np.sum(relations_neg_labels) / pred_pos_neg_ratio coeff_factor[relations_neg_labels == 1] *= factor coeff_factor[indices, indices] = 0 coeff_factor[relations_neg_labels == 1] = 0 if including_object: in_entity_confidence = image.objects_outputs_with_no_activations else: in_entity_confidence = image.objects_labels * 1000 # create the feed dictionary feed_dict = { confidence_relation_ph: image.predicates_outputs_with_no_activation, confidence_entity_ph: in_entity_confidence, module.entity_bb_ph: entity_bb, module.word_embed_entities_ph: embed_obj, module.phase_ph: False, module.word_embed_relations_ph: embed_pred, labels_relation_ph: image.predicates_labels, labels_entity_ph: image.objects_labels, labels_coeff_loss_ph: coeff_factor.reshape((-1)) } # run the network out_relation_probes_val, out_entity_probes_val, loss_val = sess.run( [out_relation_probes, out_entity_probes, loss], feed_dict=feed_dict) # set diagonal to be neg (in order not to take into account in statistics) out_relation_probes_val[indices, indices, :] = relation_neg # statistic total_test_loss += loss_val # statistics results = test(image.predicates_labels, image.objects_labels, out_relation_probes_val, out_entity_probes_val) # accumulate results if accum_test_results is None: accum_test_results = results else: for key in results: accum_test_results[key] += results[key] # print stat obj_accuracy = float(accum_test_results['entity_correct'] ) / accum_test_results['entity_total'] predicate_pos_accuracy = float( accum_test_results['relations_pos_correct'] ) / accum_test_results['relations_pos_total'] predicate_all_accuracy = float( accum_test_results['relations_correct'] ) / accum_test_results['relations_total'] relationships_pos_accuracy = float(accum_test_results['relationships_pos_correct']) / \ accum_test_results[ 'relations_pos_total'] relationships_all_accuracy = float( accum_test_results['relationships_correct'] ) / accum_test_results['relations_total'] logger.log( "VALIDATION - loss %f - obj %f - pred %f - rela %f - all_pred %f - all rela %f" % (total_test_loss, obj_accuracy, predicate_pos_accuracy, relationships_pos_accuracy, predicate_all_accuracy, relationships_all_accuracy)) # save best module so far if best_test_loss == -1 or total_test_loss < best_test_loss: module_path_save = os.path.join(module_path, name + "_best_module.ckpt") save_path = saver.save(sess, module_path_save) logger.log("Model saved in file: %s" % save_path) best_test_loss = total_test_loss # save module if epoch % SAVE_MODEL_ITERATIONS == 0: module_path_save = os.path.join(module_path, name + "_module.ckpt") save_path = saver.save(sess, module_path_save) logger.log("Model saved in file: %s" % save_path) # learning rate decay if (epoch % learning_rate_steps) == 0: lr *= learning_rate_decay
def CartClear(Cart, ResponseGroup=None, AWSAccessKeyId=None): '''CartClear in ECS''' argv = inspect.getargvalues(inspect.currentframe())[-1] return __cartOperation(XMLCartClear, argv)
def CartModify(Cart, Items, Actions, ResponseGroup=None, AWSAccessKeyId=None): '''CartModify in ECS''' argv = inspect.getargvalues(inspect.currentframe())[-1] return __cartOperation(XMLCartModify, argv)
def caller_args(): import inspect caller = inspect.stack()[2][0] args, _, _, values = inspect.getargvalues(caller) return dict([(i, values[i]) for i in args])
def open_mdsdataset(data_dir, grid_dir=None, iters='all', prefix=None, read_grid=True, delta_t=1, ref_date=None, calendar='gregorian', geometry='sphericalpolar', grid_vars_to_coords=True, swap_dims=None, endian=">", chunks=None, ignore_unknown_vars=False, default_dtype=None, nx=None, ny=None, nz=None, llc_method="smallchunks"): """Open MITgcm-style mds (.data / .meta) file output as xarray datset. Parameters ---------- data_dir : string Path to the directory where the mds .data and .meta files are stored grid_dir : string, optional Path to the directory where the mds .data and .meta files are stored, if different from ``data_dir``. iters : list, optional The iterations numbers of the files to be read. If ``None``, no data files will be read. If ``'all'`` (default), all iterations will be read. prefix : list, optional List of different filename prefixes to read. Default (``None``) is to read all available files. read_grid : bool, optional Whether to read the grid data delta_t : number, optional The timestep used in the model. (Can't be inferred.) ref_date : string, optional An iSO date string corresponding to the zero timestep, e.g. "1990-1-1 0:0:0" (See CF conventions [1]_) calendar : string, optional A calendar allowed by CF conventions [1]_ geometry : {'sphericalpolar', 'cartesian', 'llc', 'curvilinear'} MITgcm grid geometry specifier grid_vars_to_coords : boolean, optional Whether to promote grid variables to coordinate status swap_dims : boolean, optional Whether to swap the logical dimensions for physical ones. If ``None``, will be set to ``False`` for ``geometry==llc`` and ``True`` otherwise. endian : {'=', '>', '<'}, optional Endianness of variables. Default for MITgcm is ">" (big endian) chunks : int or dict, optional If chunks is provided, it used to load the new dataset into dask arrays. ignore_unknown_vars : boolean, optional Don't raise an error if unknown variables are encountered while reading the dataset. default_dtype : numpy.dtype, optional A datatype to fall back on if the metadata can't be read. nx, ny, nz : int, optional The numerical dimensions of the model. These will be inferred from ``XC.meta`` and ``RC.meta`` if they are not specified. If ``geometry==llc``, ``ny`` does not have to specified. llc_method : {"smallchunks", "bigchunks"}, optional Which routine to use for reading LLC data. "smallchunks" splits the file into a individual dask chunk of size (nx x nx) for each face of each level (i.e. the total number of chunks is 13 * nz). "bigchunks" loads the whole raw data file (either into memory or as a numpy.memmap), splits it into faces, and concatenates those faces together using ``dask.array.concatenate``. The different methods will have different memory and i/o performance depending on the details of the system configuration. Returns ------- dset : xarray.Dataset Dataset object containing all coordinates and variables. References ---------- .. [1] http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/build/ch04s04.html """ # get frame info for history frame = inspect.currentframe() _, _, _, arg_values = inspect.getargvalues(frame) del arg_values['frame'] function_name = inspect.getframeinfo(frame)[2] # auto-detect whether to swap dims if swap_dims is None: if read_grid == False: swap_dims = False else: swap_dims = False if geometry in ('llc', 'curvilinear') else True # some checks for argument consistency if swap_dims and not read_grid: raise ValueError("If swap_dims==True, read_grid must be True.") # if prefix is passed as a string, force it to be a list if type(prefix) in stringtypes: prefix = [prefix] else: pass # We either have a single iter, in which case we create a fresh store, # or a list of iters, in which case we combine. if iters == 'all': iters = _get_all_iternums(data_dir, file_prefixes=prefix) if iters is None: iternum = None else: try: iternum = int(iters) # if not we probably have some kind of list except TypeError: if len(iters) == 1: iternum = int(iters[0]) else: # We have to check to make sure we have the same prefixes at # each timestep...otherwise we can't combine the datasets. first_prefixes = prefix or _get_all_matching_prefixes( data_dir, iters[0]) for iternum in iters: these_prefixes = _get_all_matching_prefixes( data_dir, iternum, prefix) # don't care about order if set(these_prefixes) != set(first_prefixes): raise IOError("Could not find the expected file " "prefixes %s at iternum %g. (Instead " "found %s)" % (repr(first_prefixes), iternum, repr(these_prefixes))) # chunk at least by time chunks = chunks or {} # recursively open each dataset at a time kwargs = dict(grid_dir=grid_dir, delta_t=delta_t, swap_dims=False, prefix=prefix, ref_date=ref_date, calendar=calendar, geometry=geometry, grid_vars_to_coords=False, endian=endian, chunks=chunks, ignore_unknown_vars=ignore_unknown_vars, default_dtype=default_dtype, nx=nx, ny=ny, nz=nz, llc_method=llc_method) datasets = [ open_mdsdataset(data_dir, iters=iternum, read_grid=False, **kwargs) for iternum in iters ] # now add the grid if read_grid: if 'iters' in kwargs: kwargs.remove('iters') if 'read_grid' in kwargs: kwargs.remove('read_grid') datasets.insert( 0, open_mdsdataset(data_dir, iters=None, read_grid=True, **kwargs)) # apply chunking ds = xr.auto_combine(datasets) if swap_dims: ds = _swap_dimensions(ds, geometry) if grid_vars_to_coords: ds = _set_coords(ds) return ds store = _MDSDataStore(data_dir, grid_dir, iternum, delta_t, read_grid, prefix, ref_date, calendar, geometry, endian, ignore_unknown_vars=ignore_unknown_vars, default_dtype=default_dtype, nx=nx, ny=ny, nz=nz, llc_method=llc_method) ds = xr.Dataset.load_store(store) if swap_dims: ds = _swap_dimensions(ds, geometry) if grid_vars_to_coords: ds = _set_coords(ds) if ref_date and 'time' in ds: # our own little hack for decoding cf datetimes encoding = {} units = ds.time.attrs.get('units') or '' if 'since' in units: calendar = ds.time.attrs.get('calendar') encoding['units'] = units del ds.time.attrs['units'] if calendar: encoding['calendar'] = calendar del ds.time.attrs['calendar'] ds.time.data = (xr.coding.times.decode_cf_datetime( ds.time.data, units=units, calendar=calendar)) # this doesn't seem to have any effect, so we remove it #ds.time.encoding = encoding # do we need more fancy logic (like open_dataset), or is this enough if chunks is not None: ds = ds.chunk(chunks) # set attributes for CF conventions ds.attrs['Conventions'] = "CF-1.6" ds.attrs['title'] = "netCDF wrapper of MITgcm MDS binary data" ds.attrs['source'] = "MITgcm" arg_string = ', '.join( ['%s=%s' % (str(k), repr(v)) for (k, v) in arg_values.items()]) ds.attrs['history'] = ('Created by calling ' '`%s(%s)`' % (function_name, arg_string)) return ds