Exemplo n.º 1
0
    def handle_data(self, data):
        """Function for work with data between tags"""

        global title
        global raw_content
        
        try:
            if data == None or data == '':
                return
        
            ### Title tag ###
            if is_title == 1:
                # title = self.mod_data.split()
                # print("Title: '", ' ""'.join(title),"'"" '", self.mod_data)
                title = data + "\n(" + header_date + ")\n" + ('-' * 31) + "\n\n"
            else:
                if skip == 1:
                    return

                if is_body == 1 and data and data != ' ':
                    raw_content += data
        except Exception as e:
            sys.stderr.write("ERROR ({},{}): Unable to handle data:\n\t{}: {}\n".
                format(getframeinfo(currentframe()).filename,
                       getframeinfo(currentframe()).lineno,
                       str(sys.exc_info()[0]), e))
            return 1
Exemplo n.º 2
0
 def logFunctionAndArgs(self):
     frame = inspect.getouterframes(inspect.currentframe())[1][0]
     args, _, _, values = inspect.getargvalues(frame)
     frameinfo = inspect.getframeinfo(frame)
     functionName=inspect.getframeinfo(frame)[2]
     output = ""
     for arg in args[1:]: #[1:] skip the first argument 'self'
         value = values[arg]
         if isinstance(value, str):
             #add apostrophes for string values
             value = "\'"+value+"\'"
         elif isinstance(value, int):
             value = ''.join('%02X' % value)
         else:
             newValue = ""
             for i in value:
                 if isinstance(i, int):
                     newValue += '%02X' % i
                 else:
                     newValue += str(i)
             value = newValue
         output += arg + '=' + value
         if arg != args[-1]:
             #add comma if not the last element
             output +=','
     #do not print "\n' as a new line
     output = output.replace("\n","\\n")
     self.logging.info("--> "+functionName+'('+output+')')
Exemplo n.º 3
0
def hello():
    print 'hello'
    frame_me = inspect.currentframe()
    print inspect.currentframe()
    print inspect.getframeinfo(frame_me)
    print inspect.stack()[0]
    print globals()['hello']
Exemplo n.º 4
0
def find_nondebug_frame(obj, f):
    """Find the first frame that isn't a debugger frame.
    Generally we want traceback information without polluting
    it with debugger information.
    """
    if obj.dbg_pydb: return f

    f = obj.is_in_dbg(f)

    ### FIXME: would like a routine like is_in_threaddb_dispatch
    ### but works with threading instead. Decorating or subclassing
    ### threadding might do the trick.
    (filename, line_no, routine) = inspect.getframeinfo(f)[0:3]
    (path, basename)=os.path.split(filename)
    while (basename.startswith('threading.py') or
           basename.startswith('gdb.py') or
           basename.startswith('threaddbg.py') or
           basename.startswith('subcmd.py') or
           basename.startswith('pydb.py') or
           routine == 'trace_dispatch_gdb') and f.f_back:
        f = f.f_back
        (filename, line_no, routine) = \
                   inspect.getframeinfo(f)[0:3]
        (path, basename)=os.path.split(filename)
    return f
Exemplo n.º 5
0
 def run(self):
     """
     Initialization of the connections and accepting incomming communications
     """
     self.slave_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
     self.cmd_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
     self.slave_connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1);
     self.cmd_connection.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1);
     self.slave_connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
     self.cmd_connection.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
     s_port = self._parser.getValueFromSection(MASTER_CONF_LISTEN_SECTION, MASTER_CONF_LISTEN_PORT_SLAVE_ENTRY);
     c_port = self._parser.getValueFromSection(MASTER_CONF_LISTEN_SECTION, MASTER_CONF_LISTEN_PORT_CMD_ENTRY);
     if not s_port:
         frameinfo = getframeinfo(currentframe());
         self.logger.error('in run: No slave listening port defined in '+MASTER_CONF_FILE);
         sys.exit(1);
     if not c_port:
         frameinfo = getframeinfo(currentframe());
         self.logger.error('in run: No command listening port defined in '+MASTER_CONF_FILE);
         sys.exit(1);
     self.slave_connection.bind(('', int(s_port)));
     self.slave_connection.listen(MAX_SLAVES);
     self.cmd_connection.bind(('', int(c_port)));
     self.cmd_connection.listen(MAX_CMDS);
     self.loop();
def f1():

    myid = mpi_comm_rank(MPI_COMM_WORLD)
    program_state_stack(locals(), getframeinfo(currentframe()), "my_state.json")

    for local_var_i in range(2):
        for local_var_j in range(2):
            if program_state_stack(locals(), getframeinfo(currentframe())):
                my_s = "%s_f1_%d_%d_%03d_%03d.txt" % (
                    mydir,
                    local_var_i,
                    local_var_j,
                    myid,
                    getframeinfo(currentframe()).lineno,
                )
                f = open(my_s, "w")
                f.write(my_s[5:])
                f.flush()
                f.close()
            if program_state_stack(locals(), getframeinfo(currentframe())):
                my_s = "%s_f1_%d_%d_%03d_%03d.txt" % (
                    mydir,
                    local_var_i,
                    local_var_j,
                    myid,
                    getframeinfo(currentframe()).lineno,
                )
                f = open(my_s, "w")
                f.write(my_s[5:])
                f.flush()
                f.close()
            if program_state_stack(locals(), getframeinfo(currentframe())):
                my_s = "%s_f1_%d_%d_%03d_%03d.txt" % (
                    mydir,
                    local_var_i,
                    local_var_j,
                    myid,
                    getframeinfo(currentframe()).lineno,
                )
                f = open(my_s, "w")
                f.write(my_s[5:])
                f.flush()
                f.close()
                a(local_var_i, local_var_j)
            if program_state_stack(locals(), getframeinfo(currentframe())):
                my_s = "%s_f1_%d_%d_%03d_%03d.txt" % (
                    mydir,
                    local_var_i,
                    local_var_j,
                    myid,
                    getframeinfo(currentframe()).lineno,
                )
                f = open(my_s, "w")
                f.write(my_s[5:])
                f.flush()
                f.close()

    program_state_stack(locals(), getframeinfo(currentframe()), last_call="LastCall")

    mpi_finalize()
Exemplo n.º 7
0
	def test_mocking_inspect_getframeinfo(self):
		orig = inspect.getframeinfo
		when(inspect).getframeinfo.then_call(orig)

		# just check this doesn't cause infinite recursion
		f = inspect.currentframe()
		inspect.getframeinfo(f)
Exemplo n.º 8
0
 def set_next_execution(self):
     """
     Set self.next_execution to the next time the import is going to be run. If run_immediately
     is set, time is set to now()
     """
     debug_enter(who_am_i(), who_is_parent(), getframeinfo(currentframe()).lineno)
     time_adds = {
         self.EVERY_TWO_MINUTES: timedelta(seconds=120), # used for testing
         self.HOURLY: timedelta(hours=1),
         self.EVERY_SIX_HOURS: timedelta(hours=6),
         self.DAILY: timedelta(days=1),
         self.EVERY_THREE_DAYS: timedelta(days=3),
         self.WEEKLY: timedelta(weeks=1),
         self.BI_WEEKLY: timedelta(weeks=2),
         self.EVERY_FOUR_WEEKS: timedelta(weeks=4),
     }
     if self.frequency:
         if not self.next_execution:
             self.next_execution = datetime.now() + time_adds[self.frequency]
         else:
             self.next_execution += time_adds[self.frequency]
     else:
         self.next_execution = None
     self.save()
     debug_exit(who_am_i(), getframeinfo(currentframe()).lineno)
Exemplo n.º 9
0
def formatCode(frame, stream):
    _updatecache = linecache.updatecache

    def updatecache(*args):
        # linecache.updatecache looks in the module search path for
        # files that match the module name. This is a problem if you
        # have a file without source with the same name as a python
        # standard library module. We'll just check to see if the file
        # exists first and require exact path matches.
        if not os.access(args[0], os.R_OK):
            return []
        return _updatecache(*args)
    linecache.updatecache = updatecache
    try:
        try:
            frameInfo = inspect.getframeinfo(frame, context=1)
        except:
            frameInfo = inspect.getframeinfo(frame, context=0)
        fileName, lineNo, funcName, text, idx = frameInfo

        stream.write('  File "%s", line %d, in %s\n' %
                     (fileName, lineNo, funcName))
        if text is not None and len(text) > idx:
            # If the source file is not available, we may not be able to get
            # the line
            stream.write('    %s\n' % text[idx].strip())
    finally:
        linecache.updatecache = _updatecache
Exemplo n.º 10
0
        def handler(signum, frame):
            import inspect
            print inspect.getframeinfo(frame)
            print inspect.trace()
            while 1:
                s = raw_input(\
"""
 
 Enter sense switch.

    sol:  Print current best solution.
    cont: Continue calculation.
    call: Executes sigint_callback [%s].
    exit: Exits with current best solution.

 >>> """ % sigint_callback)
                if s.lower() == 'sol': 
                    print self.bestSolution
                elif s.lower() == 'cont': 
                    return
                elif s.lower() == 'call': 
                    # sigint call_back
                    if sigint_callback is not None:
                        sigint_callback(self.bestSolution)
                elif s.lower() == 'exit': 
                    self._EARLYEXIT = True
                    return
                else:
                    print "unknown option : %s" % s
            return
Exemplo n.º 11
0
def logout(request):

	# *************************************************************************
	# *************************************************************************
	# *************************************************************************
	#
	#
	# ログアウトのアクション
	#
	#
	# *************************************************************************
	# *************************************************************************
	# *************************************************************************
	logger.info('<' + __name__ + '.' + inspect.getframeinfo(inspect.currentframe()).function + '()> $$$ start $$$');
	
	# =========================================================================
	# setup	
	# =========================================================================	

	# =========================================================================
	# validation	
	# =========================================================================	

	# =========================================================================
	# process
	# =========================================================================
	_logout(request)

	# =========================================================================
	# contents
	# =========================================================================
	logger.info('<' + __name__ + '.' + inspect.getframeinfo(inspect.currentframe()).function + '()> --- end ---');
	return django.http.HttpResponseRedirect('/')
Exemplo n.º 12
0
Arquivo: tw2.py Projeto: caitouwh/kod
    def set_up_data(self, data_gross=None, data_costs=None, weight_matrix=None):
        if weight_matrix is not None:
            setattr(self, "data", weight_matrix.ffill())
            return None
        
        log=self.log
        frequency=self.frequency
        equalise_gross = self.equalise_gross
        cost_multiplier = self.cost_multiplier
        annualisation = self.annualisation
        period_target_SR = self.period_target_SR

        data_gross = [data_item.cumsum().resample(frequency, how="last").diff() for
                       data_item in data_gross]
        
        data_costs = [data_item.cumsum().resample(frequency, how="last").diff() for
                      data_item in data_costs]

        data_gross=df_from_list(data_gross)    
        data_costs=df_from_list(data_costs)    

        if equalise_gross:
            print(__file__ + ":" + str(inspect.getframeinfo(inspect.currentframe())[:3][1]) + ":" +"Setting all gross returns to be identical - optimisation driven only by costs")
        if cost_multiplier!=1.0:
            print(__file__ + ":" + str(inspect.getframeinfo(inspect.currentframe())[:3][1]) + ":" +"Using cost multiplier on optimisation of %.2f" % cost_multiplier)                
        data = work_out_net(data_gross, data_costs, annualisation=annualisation,
                            equalise_gross=equalise_gross, cost_multiplier=cost_multiplier,
                            period_target_SR=period_target_SR)                    
        setattr(self, "data", data)
Exemplo n.º 13
0
def show(request):

	###########################################################################
	# netstat の状態を表示するビュー
	###########################################################################

	logger.info('<' + __name__ + '.' + inspect.getframeinfo(inspect.currentframe()).function + '()> $$$ start $$$');

	# =========================================================================
	# setup	
	# =========================================================================	

	# =========================================================================
	# validation	
	# =========================================================================	

	# =========================================================================
	# process
	# =========================================================================

	# =========================================================================
	# contents
	# =========================================================================
	fields = {}
	util.fill_menu_items(request, fields)
	context = django.template.RequestContext(request, fields)
	template = django.template.loader.get_template('listeners/show.html')
	logger.info('<' + __name__ + '.' + inspect.getframeinfo(inspect.currentframe()).function + '()> --- end ---');
	return django.http.HttpResponse(template.render(context))
Exemplo n.º 14
0
 def loop(self):
     """
     Main loop. Waits for new connections.
     """
     self.run = True;
     while self.run:
         try:
             rlist, wlist, elist = select.select([self.slave_connection], [], [], SELECT_TIMEOUT);
             for connection in rlist:
                 self.accept_new_slave_connection(connection);
             rlist, wlist, elist = select.select([self.cmd_connection], [], [], SELECT_TIMEOUT);
             for connection in rlist:
                 self.accept_new_cmd_connection(connection);
         except KeyboardInterrupt as e:
             frameinfo = getframeinfo(currentframe());
             self.logger.info('in loop: Keyboard interrupt: leaving program');
             print("[ MASTER DAEMON " + frameinfo.filename + ":" + str(frameinfo.lineno) + " ]: Keyboard Interrupt");
             self.stop();
             sys.exit(0);
         except ValueError as e:
             frameinfo = getframeinfo(currentframe());
             self.logger.error('in loop: Value error: ' + str(e));
             print("[ MASTER DAEMON " + frameinfo.filename + ":" + str(frameinfo.lineno) + "]: Value Error");
             print(e);
             pass;
	def get_catch_exception_paraname( self ):
		paraname = ''
		lines = self.ori_block.split( self.split_by )
		for line in lines:
			if SSW( line , "}" ) and SEW( line , "{" ) \
				and line.find( "catch" ) > 0 :
				int_l =  line.find( "(" )
				int_r = line.find( ")" )
				if int_l > 0 and int_r > int_l :
					str_a = line[ int_l + 1 : int_r ]
					str_a=str_a.replace( "final" , "" )
					str_a = RemoveJavaCommentsInLine( str_a )
					str_a = str_a.strip()
					str_a = MSTOO( str_a )
					strs = str_a.split( ' ' )
					if 2 != len( strs ):
						print " Catch parameters variable is not right "
						print self.ori_block
						print line
						frameinfo = getframeinfo(currentframe())
						print frameinfo.filename, frameinfo.lineno						
						sys.exit( 0 )
					paraname = strs[ 1 ]
		if '' == paraname:
			print " Catch parameters variable is not right "
			print self.ori_block
			frameinfo = getframeinfo(currentframe())
			print frameinfo.filename, frameinfo.lineno						
			sys.exit( 0 )			
		return paraname
Exemplo n.º 16
0
 def connect_to_masters(self):
     """
     Stored every device on network which have his hostname beginning by "MD3" and stores it
     in the self.connected_masters dict(), with hostnames as keys and sockets freshly open as values.
     """
     hostname = socket.gethostname()
     self.connected_masters = {};
     for host in self._hostlist:
         if MASTER_NAME_PREFIX in host._Hostname or str(host._IpAddr) == '127.0.0.1':
             port = self._parser.getValueFromSection(SLAVE_CONF_CONNECT_SECTION, SLAVE_CONF_CONNECT_PORT_ENTRY);
             if not port:
                 self.logger.error('in connect_to_masters: No ' + SLAVE_CONF_CONNECT_PORT_ENTRY + ' in ' + SLAVE_CONF_CONNECT_SECTION + ' section or maybe no such ' + SLAVE_CONF_CONNECT_SECTION + ' defined');
                 sys.exit(1);
             try:
                 self.logger.debug('Connecting to ' + str(host._IpAddr) + ':' + str(port));
                 sock = socket.create_connection((host._IpAddr, port));
                 hostname = host._Hostname.split('.')[0];
                 self.connected_masters[host._Hostname] = sock;
             except Exception as e:
                 frameinfo = getframeinfo(currentframe());
                 self.logger.error('in connect_to_masters: ' + str(e));
                 pass;
     if SLAVE_NAME_PREFIX in hostname:
         port = self._parser.getValueFromSection(SLAVE_CONF_CONNECT_SECTION, SLAVE_CONF_CONNECT_PORT_ENTRY);
         if not port:
             self.logger.error('in connect_to_masters: No ' + SLAVE_CONF_CONNECT_PORT_ENTRY + ' in ' + SLAVE_CONF_CONNECT_SECTION + ' section or maybe no such ' + SLAVE_CONF_CONNECT_SECTION + ' defined');
             sys.exit(1);
         try:
             self.logger.debug('Connecting to 127.0.0.1:' + str(port));
             sock = socket.create_connection(('127.0.0.1', port));
             self.connected_masters[hostname] = sock;
         except Exception as e:
             frameinfo = getframeinfo(currentframe());
             self.logger.error('in connect_to_masters: ' + str(e));
             pass;
Exemplo n.º 17
0
def parse_module_or_class(mod_class, parser, in_args, parse_type = 'class'):
    if DEBUG_PARSING: print 'LINE:',inspect.getframeinfo(inspect.currentframe()).lineno,'parse_module_or_class'
    global single_function
    p = parser
    non_help_in_args = [ it for it in in_args if not it == '--help' ]
    # function is being pulled from command line
    in_args = in_args if in_args == ['--help'] else in_args[1:]
    THIS_ARG = '' if not non_help_in_args else non_help_in_args[0]
    p.description = mod_class.__doc__
    p.usage = p.format_usage().rstrip(' [--help] \n').replace('usage: ', '') + ' ' + THIS_ARG + ' [--help]\n'
    if parse_type == 'class':
        fxs = [ name for name, fx in inspect.getmembers(mod_class, inspect.ismethod) if hasattr(fx, 'argh_args') ]
    elif parse_type == 'module':
        fxs = [ name for name, fx in inspect.getmembers(mod_class, inspect.isfunction) if hasattr(fx, 'argh_args') ]
        # fxs                                 =   [name for name,fx in inspect.getmembers(mod_class,inspect.isfunction)]
    # If single-class, clean usage
    if no_class or single_class:
        p.usage = p.usage.replace(' %s ' % THIS_ARG, ' ')
    # If Class only has one function, this function is called by default
    if len(fxs) == 1:
        single_function = fxs[0]
        if not in_args == ['--help']:
            in_args.insert(0, single_function)
    else:
        single_function = False
    # Parsing Class Ends Here
    if not in_args or in_args == ['--help']:
        sp = p.add_subparsers()
        for it in fxs:
            sp.add_parser(it, help=getattr(mod_class, it).__doc__)
        # if os.environ.has_key('COMP_LINE'):
        #     os.system(                         "logger -t 'PY_PARSE_C1' '%s'" % str('>>'+os.environ['COMP_LINE']+'<<'))
        # else:
        #     os.system(                         "logger -t 'PY_PARSE_C1' '%s'" % str('>>'+str(in_args)+'<<'))
        p.usage = '\n\t' + p.format_usage().replace('usage: ', '', 1).replace('[--help]', '').replace(' ... ', '').rstrip(' \n')
        sp.metavar = 'FUNCTION'
        p.usage += ' %s [--help]' % sp.metavar
        argcomplete.autocomplete(p)
        if DEBUG_PARSING: print 'LINE:',inspect.getframeinfo(inspect.currentframe()).lineno
        return p.parse_args()
    # Unrecognized Parse instruction
    if in_args and not fxs.count(in_args[0]):
        argcomplete.autocomplete(p)
        print 'Unrecognized <function>'
        print in_args
        p.print_help()
        if DEBUG_PARSING: print 'LINE:',inspect.getframeinfo(inspect.currentframe()).lineno
        exit(1)
    # Execute Parse instruction
    else:
        # if os.environ.has_key('COMP_LINE'):
        #     os.system(                         "logger -t 'PY_PARSE_C3' '%s'" % str('>>'+os.environ['COMP_LINE']+'<<'))
        # else:
        #     os.system(                         "logger -t 'PY_PARSE_C3' '%s'" % str('>>'+str(in_args)+'<<'))
        #     os.system(                         "logger -t 'PY_PARSE_C3' '%s'" % str('>>'+str(p.usage)+'<<'))
        p_help = argparse.SUPPRESS if [ it for it in in_args if not it == '--help' ] else mod_class.__doc__
        p.add_argument('_func', choices=fxs, help=p_help)
        if DEBUG_PARSING: print 'LINE:',inspect.getframeinfo(inspect.currentframe()).lineno
        return parse_function(getattr(mod_class, in_args[0]), p, in_args)
Exemplo n.º 18
0
def main ():
  setup_logging()
  _monkeypatch_console()
  try:
    if doLaunch():
      post_startup()
      core.goUp()
    else:
      return

  except SystemExit:
    return
  except:
    import traceback
    traceback.print_exc()
    return

  if cli:
    print "This program comes with ABSOLUTELY NO WARRANTY.  This program is " \
          "free software,"
    print "and you are welcome to redistribute it under certain conditions."
    print "Type 'help(pox.license)' for details."
    import pox.license
    import time
    time.sleep(1)
    import code
    import sys
    sys.ps1 = "POX> "
    sys.ps2 = " ... "
    l = dict(locals())
    l['core'] = core
    code.interact('Ready.', local=l)
  else:
    try:
      import traceback
      import time
      import sys
      import inspect
      
      while True:
        if 'deadlock' in globals()['options'] and globals()['options']['deadlock']:
          frames = sys._current_frames()
          for key in frames:
            frame = frames[key]
            print inspect.getframeinfo(frame)
            outer_frames = inspect.getouterframes(frame)
            for i in range(0, len(outer_frames)): 
              print "     " + str(inspect.getframeinfo(outer_frames[i][0]))

        time.sleep(5)
    except:
      if 'deadlock' in globals()['options'] and globals()['options']['deadlock']:
        traceback.print_exc(file=sys.stdout)
    #core.scheduler._thread.join() # Sleazy

  try:
    pox.core.core.quit()
  except:
    pass
Exemplo n.º 19
0
    def optimise(self, ann_SR_costs=None):

        """
    
        Optimise weights over some returns data
        
        """
        log=self.log
        date_method = self.date_method
        rollyears = self.rollyears
        optimiser = self.optimiser
        cleaning = self.cleaning
        apply_cost_weight = self.apply_cost_weight
        
        data=getattr(self, "data", None)
        if data is None:
            log.critical("You need to run .set_up_data() before .optimise()")
        
        fit_dates = generate_fitting_dates(data, date_method=date_method, rollyears=rollyears)
        setattr(self, "fit_dates", fit_dates)
    
        ## Now for each time period, estimate weights
        ## create a list of weight vectors
        weight_list=[]
        
        ## create a class object for each period
        opt_results=[]
        
        print(__file__ + ":" + str(inspect.getframeinfo(inspect.currentframe())[:3][1]) + ":" +"Optimising...")
        
        for fit_period in fit_dates:            
            print(__file__ + ":" + str(inspect.getframeinfo(inspect.currentframe())[:3][1]) + ":" +"Optimising for data from %s to %s" % (str(fit_period.period_start), str(fit_period.period_end)))
            ## Do the optimisation for one period, using a particular optimiser instance
            results_this_period=optSinglePeriod(self, data, fit_period, optimiser, cleaning)

            opt_results.append(results_this_period)

            weights=results_this_period.weights
            
            ## We adjust dates slightly to ensure no overlaps
            dindex=[fit_period.period_start+datetime.timedelta(days=1), 
                    fit_period.period_end-datetime.timedelta(days=1)]
            
            ## create a double row to delineate start and end of test period
            weight_row=pd.DataFrame([weights]*2, index=dindex, columns=data.columns)
            weight_list.append(weight_row)

        ## Stack everything up    
        raw_weight_df=pd.concat(weight_list, axis=0)
        
        if apply_cost_weight:
            print(__file__ + ":" + str(inspect.getframeinfo(inspect.currentframe())[:3][1]) + ":" +"Applying cost weighting to optimisation results")
            weight_df = apply_cost_weighting(raw_weight_df, ann_SR_costs)
        else:
            weight_df =raw_weight_df 
        
        setattr(self, "results", opt_results)
        setattr(self, "weights", weight_df)
        setattr(self, "raw_weights", raw_weight_df)
Exemplo n.º 20
0
def display_function_info():

    caller = inspect.getframeinfo(inspect.currentframe().f_back)[2]

    print ""
    print term_color.OKGREEN+"{0}:{1}".format(inspect.getframeinfo(inspect.currentframe().f_back)[0], caller) +term_color.ENDC

    return
Exemplo n.º 21
0
    def __init__(self, a_log_flag=False):
        self.log_flag = a_log_flag

        callerframerecord = inspect.stack()[1]
        frame = callerframerecord[0]
        self.file = inspect.getframeinfo(frame).filename.split("/").pop()
        self.func = inspect.getframeinfo(frame).function
        self.line = inspect.getframeinfo(frame).lineno
Exemplo n.º 22
0
 def get_caller(self):
   fm = inspect.currentframe()
   fm_info = inspect.getframeinfo(fm)
   curr_filename = fm_info.filename
   while fm_info.filename == curr_filename:
     fm = fm.f_back
     fm_info = inspect.getframeinfo(fm)
   
   return (fm, fm_info)
Exemplo n.º 23
0
def trace_calls_and_returns(frame, event, arg):
	co = frame.f_code
	filename = co.co_filename
	func_name = co.co_name
	code = inspect.getframeinfo(frame)[3]
	global current_trace
	
	'''if sys==None:
		return
	if func_name == 'write':
		# Ignore write() calls from print statements
		return
	'''
	if filename=='/usr/lib/python2.7/socket.py': # use socket.__file__ to get this
		#print filename + '/' + func_name
		#traceback.print_stack(frame)
		previous_frames = inspect.getouterframes(frame)
		for parent in previous_frames:
			frame_details = inspect.getframeinfo(parent[0])
			if frame_details[2] in functions_being_traced:
				#print frame_details
				#TODO if already in the list, don't add a duplicate
				current_trace.setdefault(frame_details[2],{}).setdefault('per_line_dependencies',{}).setdefault(frame_details[1],[]).append('network')
	if code is not None:
		if string.find(' '.join(code),' open(')!=-1:
			previous_frames = inspect.getouterframes(frame)
			for parent in previous_frames:
				frame_details = inspect.getframeinfo(parent[0])
				if frame_details[2] in functions_being_traced:
					#print frame_details
					current_trace.setdefault(frame_details[2],{}).setdefault('per_line_dependencies',{}).setdefault(frame_details[1],[]).append('filesystem')
	#TODO want to trace child functions as well... but for that, need to know which files to ignore/not ignore
	#figure out based on analysis of previous_frames

	#return
	#'kw'
	#line_no = frame.f_lineno

	# ignore everything outside of these files, as built-in or third-party
	if func_name in functions_being_traced:#filename in files_to_trace and 
		'''print "%s: %s %s[%s]" % (
                event,
                frame.f_code.co_name,
                frame.f_code.co_filename,
                frame.f_lineno,
            )'''
		if event=='call':
			#get the input parameters
			#print str(frame.f_locals)
			current_trace.setdefault(func_name,{})['function_name'] = func_name
			current_trace.setdefault(func_name,{})['arg'] = copy.copy(frame.f_locals)
			#current_trace.setdefault(str(f.__name__),{})['kw'] = kw
			return trace_calls_and_returns
		elif event=='line': # also gets called for return events, too!
			return trace_lines
	else:
		return
Exemplo n.º 24
0
def Test_Start(message=""):
    print "_______________________________________________"
    caller = inspect.getframeinfo(inspect.currentframe().f_back)[2]
    print "Test Harness:" + inspect.getframeinfo(inspect.currentframe().f_back)[0]
    print term_color.OKGREEN + "Entering:" + caller + term_color.ENDC
    if message != "":
        print "Notes:"+message
    print "------------------------------------------------"
    return
Exemplo n.º 25
0
def handle_stackframe_without_leak():
  frame = inspect.currentframe()
  print "stack:"
  try:      
    while frame!=None:
      print inspect.getframeinfo(frame)
      frame=frame.f_back
  finally:      
    del frame
  sys.stdout.flush()
Exemplo n.º 26
0
def bootstrap_portfolio(subset_data, moments_estimator,
                cleaning, must_haves,
                  monte_runs=100, bootstrap_length=50,
                  **other_opt_args):
    """
    Given dataframe of returns; returns_to_bs, performs a bootstrap optimisation
    
    We run monte_carlo numbers of bootstraps
    Each one contains monte_length days drawn randomly, with replacement 
    (so *not* block bootstrapping)
    
    The other arguments are passed to the optimisation function markosolver
    
    :param subset_data: The data to optimise over
    :type subset_data: pd.DataFrame TxN

    :param cleaning: Should we clean correlations so can use incomplete data?
    :type cleaning: bool

    :param must_haves: The indices of things we must have weights for when cleaning
    :type must_haves: list of bool

    :param monte_runs: The number of bootstraps to do
    :type monte_runs: list of float

    :param bootstrap_length: Number of periods in each bootstrap
    :type bootstrap_length: int

    *_params passed through to data estimation functions

    **other_opt_args passed to single period optimiser

    :returns: float
    
    """
    print(__file__ + ":" + str(inspect.getframeinfo(inspect.currentframe())[:3][1]) + ":" + "bootstrap_length=" + str(bootstrap_length))
    print(__file__ + ":" + str(inspect.getframeinfo(inspect.currentframe())[:3][1]) + ":" + "bootstrap_length=" + str(type(moments_estimator)))

    all_results=[bs_one_time(subset_data, moments_estimator,
                            cleaning, must_haves, 
                            bootstrap_length,
                            **other_opt_args)
                                for unused_index in range(monte_runs)]
        
    ### We can take an average here; only because our weights always add up to 1. If that isn't true
    ###    then you will need to some kind of renormalisation

    weightlist=np.array([x[0] for x in all_results], ndmin=2)
    diaglist=[x[1] for x in all_results]
         
    theweights_mean=list(np.mean(weightlist, axis=0))
    
    diag=dict(bootstraps=diaglist)
    
    return (theweights_mean, diag)
Exemplo n.º 27
0
def _caller(depth=1):
    ''' Get caller of current frame '''
    cf = inspect.currentframe()

    for i in range(depth + 1):
        cf = cf.f_back

    try:
        return '[%s] ' % inspect.getframeinfo(cf).function
    except AttributeError:
        return '[%s] ' % inspect.getframeinfo(inspect.currentframe()).function
Exemplo n.º 28
0
def ParseCommandLine():
    parser = argparse.ArgumentParser(description='A tool to compare to directories and move files')
    parser.add_argument('-debug',help='Enable debugging',action='store_true')
    args = parser.parse_args()

    if args.debug:
        import pdb
        pdb.set_trace()
        Logger('debug is on', getframeinfo(currentframe()))
    else:
        Logger('debug is off', getframeinfo(currentframe()))
Exemplo n.º 29
0
def handle_stackframe_without_leak():

    cat = ["meow"]

    frame = sys.getframe(0)

    print frame
    try:
        print inspect.getframeinfo( frame )
        # do something with the frame
    finally:
        del frame
Exemplo n.º 30
0
    def reportError(msg):
        if not jcl_islogging:
            return
        frame = inspect.currentframe()
        # TODO: Need a more reliable approach to get the external caller's frame
        while inspect.getframeinfo(frame).filename.endswith('jclib.py'):
            frame = frame.f_back

        print(msg)
        while frame:
            frameinfo = inspect.getframeinfo(frame)
            print("\t" + frameinfo.filename + "(" + str(frameinfo.lineno) + ")")
            frame = frame.f_back
Exemplo n.º 31
0
    isBlock = isBlockDevice(args["device"])
    if (isBlock):
        if (args["image"] != "") and (args[""]):
            None
    else:
        print(
            "'" + args["device"] +
            "' is not a valid device, check if it exists", 2)
        print("could not find: " + args["device"], -1)


# declaring usefull global variables
home = os.path.expanduser("~")
name = os.path.basename(sys.argv[0])

fileAndPath = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(fileAndPath))

prePend = "[ " + name + " ] "
description = name + "; " + "Python script entry point for RavenNucleon\
 so that archlinux may reign supreme on whatever you choose for Nucleon\
to install it on."

dependancies = ["https://github.com/DreamingRaven/RavenPythonLib"]

# capture arguments in dict then put into json for bash
args = argz(sys.argv[1:], description=description)
args_json = json.loads(json.dumps(args))

# setting fallback logger here pre-update
log = Log(logLevel=args["loglevel"])
Exemplo n.º 32
0
def _current_postion_info():
    cf = currentframe()
    frameinfo = " (at {}:{})".format(
        os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno
    )
    return frameinfo
Exemplo n.º 33
0
def LogCaller():
    fi = getframeinfo(currentframe().f_back.f_back)
    msg = '[{}] Called from: {}:{}'.format(g.__plugin__, opb(fi.filename),
                                           fi.lineno)
    xbmc.log(msg, Log.NOTICE)
Exemplo n.º 34
0
    rospy.init_node(name='lidar', log_level=rospy.DEBUG)

    while True:
        countLog += 1

        if (countLog % 5 == 1):
            rospy.logdebug("This is a debug message " + str(countLog))

        if (countLog % 5 == 2):
            try:
                a = 1 / 0
            except ZeroDivisionError:

                rospy.loginfo("Cannot divide by zero  lineno - " +
                              str(getframeinfo(currentframe()).lineno) +
                              " count -" + str(countLog))

        if (countLog % 5 == 3):
            try:
                raise IOError
            except IOError:
                rospy.logerr("IO Error has occurred lineno - " +
                             str(getframeinfo(currentframe()).lineno) +
                             " count -" + str(countLog))

        if (countLog % 5 == 4):
            try:
                raise EOFError
            except EOFError:
                rospy.logwarn("Reached End-Of-File warning lineno - " +
Exemplo n.º 35
0
    def check_total_irradiation_other_source(self,
                                             column,
                                             other_radiation,
                                             threshold_pct,
                                             label_other='_other'):

        name_check_function = inspect.getframeinfo(
            inspect.currentframe()).function

        other_radiation_copy = other_radiation.copy()

        other_radiation_copy.name += label_other
        column_other = other_radiation_copy.name

        df_joined = self.df.join(other_radiation_copy, how='inner')

        irradiation = mc_solar.daily_irradiation(
            df_joined[column], samples_per_hour=self.samples_per_hour)
        irradiation_other = mc_solar.daily_irradiation(
            df_joined[column_other], samples_per_hour=self.samples_per_hour)

        if irradiation < DAILY_IRRADIATION_THRESHOLD:  # Avoids future errors
            return None

        diff_radiation = abs(irradiation -
                             irradiation_other) / irradiation * 100

        condition_list = diff_radiation < threshold_pct

        buffer = None

        if not (condition_list):
            plt.figure()
            df_joined[column].plot(style='k.')
            df_joined[column_other].plot(style='r.')
            plt.legend([column, column_other])
            plt.title(name_check_function + ':' + column)
            plt.suptitle(self.type_data_station, fontsize=18)

            buffer = io.BytesIO()
            plt.savefig(buffer)
            buffer.seek(0)

        irrad_filt = self.df[column][lambda m: m > DNI_RADIATION_THRESHOLD]
        num_radiation_transitions_value = mc_solar.num_radiation_transitions(
            irrad_filt)

        if num_radiation_transitions_value < NUM_RADIATION_TRANSITIONS_THRESHOLD:
            self.assertion_base(
                condition=condition_list,
                error_message=
                'Total irradiation from {} is different to {} in more than {}%. It is {:.1f}% while DAILY_IRRADIATION_THRESHOLD is {:.2} kWh/(m2·day)'
                .format(column, column_other, threshold_pct, diff_radiation,
                        DAILY_IRRADIATION_THRESHOLD),
                check_type=name_check_function,
                figure=buffer)
        else:
            self.assertion_base(
                condition=False,
                error_message=
                'Comparison of total irradiations {} and {} not checked because the number of cloudy moments={} [with a DRADIATION_DT={}] is higher than threshold={}'
                .format(column, column_other, num_radiation_transitions_value,
                        DRADIATION_DT, NUM_RADIATION_TRANSITIONS_THRESHOLD),
                error_level='INFO',
                check_type=name_check_function,
                figure=buffer)
Exemplo n.º 36
0
    def check_radiation_other_source(self,
                                     column,
                                     other_radiation,
                                     threshold_pct,
                                     radiation_threshold=None,
                                     label_other='_other'):

        name_check_function = inspect.getframeinfo(
            inspect.currentframe()).function

        if radiation_threshold is None:
            radiation_threshold = DNI_RADIATION_THRESHOLD

        other_radiation_copy = other_radiation.copy()

        other_radiation_copy.name += label_other
        column_other = other_radiation_copy.name

        df_joined = self.df.join(other_radiation_copy, how='inner')

        df_filt = df_joined[df_joined[column] > radiation_threshold]

        if len(df_filt) == 0:  # Avoids future errors
            return None

        condition_list = (df_filt[column] - df_filt[column_other]
                          ).abs() / df_filt[column_other] * 100 < threshold_pct

        buffer = None
        if not condition_list.all():
            plt.figure()
            df_filt[column].plot(style='.')
            df_filt[column_other].plot(style='.')
            df_filt[column][~condition_list].plot(style='rP')
            plt.legend([column, column_other])
            plt.title(name_check_function + ':' + column)
            plt.suptitle(self.type_data_station, fontsize=18)

            buffer = io.BytesIO()
            plt.savefig(buffer)
            buffer.seek(0)

        irrad_filt = self.df[column][lambda m: m > DNI_RADIATION_THRESHOLD]
        num_radiation_transitions_value = mc_solar.num_radiation_transitions(
            irrad_filt)

        if num_radiation_transitions_value < NUM_RADIATION_TRANSITIONS_THRESHOLD:
            self.assertion_base(
                condition=condition_list.all(),
                error_message=
                'No coherence between {} and {} radiation sources considering a percentage THRESHOLD of {} % in {}'
                .format(column, column_other, threshold_pct,
                        df_filt[column][~condition_list].index),
                check_type=name_check_function,
                figure=buffer)
        else:
            self.assertion_base(
                condition=False,
                error_message=
                'Comparison of radiation {} and {} from different sources not checked because the number of cloudy moments={} [with a DRADIATION_DT={}] is higher than threshold={}'
                .format(column, column_other, num_radiation_transitions_value,
                        DRADIATION_DT, NUM_RADIATION_TRANSITIONS_THRESHOLD),
                error_level='INFO',
                check_type=name_check_function,
                figure=buffer)
Exemplo n.º 37
0
    def reset_default_config(self):
        # This is the only variable that we should change, the top directory
        topdir = self.get_config(param=Config.PARAM_TOPDIR)
        if not os.path.isdir(topdir):
            errmsg = str(self.__class__) + ' ' + str(getframeinfo(currentframe()).lineno) \
                     + ': Fatal error initializing config, "' + str(topdir) + '" is not a directory!'
            lg.Log.critical(errmsg)
            raise Exception(errmsg)

        param_values_to_set = {
            # Top directory
            Config.PARAM_TOPDIR:
            Config.DEFVAL_TOPDIR,
            # Logging
            Config.PARAM_LOG_LEVEL:
            Config.DEFVAL_LOGLEVEL,
            #######################################################################
            # Intent Server Stuff
            #######################################################################
            Config.PARAM_DO_PROFILING:
            Config.DEFVAL_DO_PROFILING,
            #######################################################################
            # Models Stuff
            #######################################################################
            Config.PARAM_MODEL_DIR:
            Config.DEFVAL_MODEL_DIR,
            Config.PARAM_MODEL_NAME:
            Config.DEFVAL_MODEL_NAME,
            Config.PARAM_MODEL_LANG:
            Config.DEFVAL_MODEL_LANG,
            Config.PARAM_MODEL_IDENTIFIER:
            Config.DEFVAL_MODEL_IDENTIFIER,
            #######################################################################
            # NLP Stuff
            #######################################################################
            # Word lists
            Config.PARAM_NLP_DIR_WORDLIST:
            Config.DEFVAL_NLP_DIR_WORDLIST,
            Config.PARAM_NLP_DIR_APP_WORDLIST:
            Config.DEFVAL_NLP_DIR_APP_WORDLIST,
            Config.PARAM_NLP_POSTFIX_WORDLIST:
            Config.DEFVAL_NLP_POSTFIX_WORDLIST,
            Config.PARAM_NLP_POSTFIX_APP_WORDLIST:
            Config.DEFVAL_NLP_POSTFIX_APP_WORDLIST,
            # Synonym lists
            Config.PARAM_NLP_DIR_SYNONYMLIST:
            Config.DEFVAL_NLP_DIR_SYNONYMLIST,
            Config.PARAM_NLP_POSTFIX_SYNONYMLIST:
            Config.DEFVAL_NLP_POSTFIX_SYNONYMLIST,
            # Stopwords lists (to be outdated)
            Config.PARAM_NLP_POSTFIX_STOPWORDS:
            Config.DEFVAL_NLP_POSTFIX_STOPWORDS,
            Config.PARAM_NLP_DIR_APP_STOPWORDS:
            Config.DEFVAL_NLP_DIR_APP_STOPWORDS,
            Config.PARAM_NLP_POSTFIX_APP_STOPWORDS:
            Config.DEFVAL_NLP_POSTFIX_APP_STOPWORDS,
            # NLTK or whatever download dir
            Config.PARAM_NLP_DIR_NLP_DOWNLOAD:
            Config.DEFVAL_NLP_DIR_NLP_DOWNLOAD,
            #######################################################################
            # NLP Conversation Model
            #######################################################################
            Config.PARAM_NLP_DAEHUA_DIR:
            Config.DEFVAL_DAEHUA_DIR,
            Config.PARAM_NLP_DAEHUA_PATTERN_JSON_FILE:
            Config.DEFVAL_DAEHUA_PATTERN_JSON_FILE,
            #######################################################################
            # Model Backtesting
            #######################################################################
            Config.PARAM_MODEL_BACKTEST_DETAILED_STATS:
            Config.DEFVAL_MODEL_BACKTEST_DETAILED_STATS
        }

        for param in param_values_to_set.keys():
            default_value = param_values_to_set[param]
            self.set_default_value_if_not_exist(param=param,
                                                default_value=default_value)
        return
Exemplo n.º 38
0
def display_memory_info():
    import inspect, os
    f = inspect.currentframe()
    fileline = inspect.getframeinfo(f.f_back)
    fileline = f"{os.path.basename(fileline.filename)}:{fileline.lineno}"
    core.display_memory_info(fileline)
Exemplo n.º 39
0
    def compile_for_single_resource_type(
        self, query, resource_type, mapping=None, root_replacer=None
    ):
        """
        :param: query

        :param: mapping: Elasticsearch mapping for FHIR resources.

        :root_replacer: Path´s root replacer:
            Could be mapping name or index name in zope´s ZCatalog context
        """
        body_structure = ElasticSearchDialect.create_structure()
        conditional_terms = [
            w
            for w in query.get_where()
            if (
                not INonFhirTerm.providedBy(w)
                and w.path.context.resource_type == resource_type
            )
            or INonFhirTerm.providedBy(w)
        ]
        for term in conditional_terms:
            q, unary_operator = self.resolve_term(term, mapping, root_replacer)

            if unary_operator == OPERATOR.neg:
                container = body_structure["query"]["bool"]["must_not"]
            elif unary_operator == OPERATOR.pos:
                container = body_structure["query"]["bool"]["filter"]
            else:
                # xxx: if None may be should?
                from inspect import currentframe, getframeinfo

                frameinfo = getframeinfo(currentframe())
                raise NotImplementedError(
                    f"File: {frameinfo.filename} Line: {frameinfo.lineno + 1}"
                )

            container.append(q)

        # if not searching on all resources, add a predicate to filter on resourceType
        if resource_type != "Resource":
            ElasticSearchDialect.apply_from_constraint(
                query, body_structure, resource_type, root_replacer=root_replacer
            )

        # Sorting
        ElasticSearchDialect.apply_sort(
            query.get_sort(), body_structure, root_replacer=root_replacer
        )
        # Limit
        ElasticSearchDialect.apply_limit(query.get_limit(), body_structure)
        # ES source_
        ElasticSearchDialect.apply_source_filter(
            query, body_structure, root_replacer=root_replacer
        )

        ElasticSearchDialect.clean_up(body_structure)

        if "should" in body_structure["query"]["bool"]:
            if "minimum_should_match" not in body_structure["query"]["bool"]:
                body_structure["query"]["bool"]["minimum_should_match"] = 1

        return body_structure
Exemplo n.º 40
0
def varname(p):
    for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
        m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
        if m:
            return m.group(1)
Exemplo n.º 41
0
 def log_frame(self):
     if self.chooser.dirspec_log:
         stack = inspect.stack()
         frame, _, _, _, _, _ = stack[1]
         args, _, _, values = inspect.getargvalues(frame)
         print(">>> %s: args=%s" % (inspect.getframeinfo(frame)[2], [(i, values[i]) for i in args[1:]]))
Exemplo n.º 42
0
    'develop-eggs',
    'downloads',   
    'eggs',
    'etc',
    'parts',
    'python',
    'sql_dumps',
]

TAG = '%%s(PROJECT_NAME)%%s' %% ('$', '$') #make sure the tag definition does not replace it self
UTAG = '$(USERNAME)$'
#SKELETON_PATH = 'odoo/skeleton'
SKELETON_NAME = 'skeleton'
DEBUG = True

filename = inspect.getframeinfo(inspect.currentframe()).filename
# PROJECT_HOME is the folder in which the project is created
PROJECT_HOME  = os.path.split(os.path.dirname(os.path.abspath(filename)))[0]
# PROJECT_LIST_DIR the folder that houses all projects
PROJECT_LIST_DIR = os.path.split(os.path.split(PROJECT_HOME)[0])[0]

class bcolors:
    """
    """
    HEADER = '\033[95m'
    OKBLUE = '\033[94m'
    OKGREEN = '\033[92m'
    WARNING = '\033[93m'
    FAIL = '\033[91m'
    ENDC = '\033[0m'
    BOLD = '\033[1m'
Exemplo n.º 43
0
 def __init__(self):
     self.frame = getframeinfo(currentframe())
     self.establish_connection()
Exemplo n.º 44
0
 def loc(self, c: Any) -> Tuple[str, str, int]:
     (f, line, name, lines, index) = inspect.getframeinfo(c)
     """ Returns location information of the caller """
     return (f, line, name, lines, index)
Exemplo n.º 45
0
    def _trace(self, text, tabs=0, level=0, severity='info'):
        cf = currentframe()
        calframe = getouterframes(cf, 2)

        logger = self._setLoggingFile()

        try:
            if self.traceOutput == 'un-defined':
                pass
        except:
            print(
                'traceOutput not set defaulting to terminal and loggingLevel of 5'
            )
            self.traceOutput = 'terminal'
            self.loggingLevel = 5

        if self.traceOutput == 'un-defined':
            self._error(self._errors['error_002'],
                        'err-002',
                        getframeinfo(cf).filename,
                        str(cf.f_lineno),
                        sys._getframe().f_code.co_name,
                        'terminate',
                        notify='void')

        if self.traceOutput not in {'both', 'terminal', 'none', 'file'}:
            self._error(self._errors['error_003'],
                        'err-003',
                        getframeinfo(cf).filename,
                        str(cf.f_lineno),
                        sys._getframe().f_code.co_name,
                        'terminate',
                        notify='void')

        if severity not in {'info', 'debug', 'critical', 'error', 'warning'}:
            self._error(self._errors['error_004'],
                        'err-004',
                        getframeinfo(cf).filename,
                        str(cf.f_lineno),
                        sys._getframe().f_code.co_name,
                        'terminate',
                        notify='void')

        if self.traceOutput == 'terminal' or self.traceOutput == 'both':

            if level < self.loggingLevel:

                for i in range(1, tabs + 1):
                    print("\t", end='')
                print('% ' + str(os.path.basename(calframe[1][1])) + '[' +
                      str(calframe[1][2]) + '] : ' + str(text))

        if self.traceOutput == 'file' or self.traceOutput == 'both':

            if level < self.loggingLevel:
                optStr = 'logger.setLevel(logging.' + severity.upper() + ')'
                eval(optStr)
                cmdStr = 'logger.' + severity + '(str(text))'
                eval(cmdStr)
                # set back to default of INFO
                logger.setLevel(logging.INFO)
Exemplo n.º 46
0
def main():
	program_name = os.path.basename(sys.argv[0])
	usage = program_name + """  input_image_path  output_directory  --selection_list=selection_list  --wn=CTF_WINDOW_SIZE --apix=PIXEL_SIZE  --Cs=CS  --voltage=VOLTAGE  --ac=AMP_CONTRAST  --f_start=FREA_START  --f_stop=FREQ_STOP  --vpp  --kboot=KBOOT  --overlap_x=OVERLAP_X  --overlap_y=OVERLAP_Y  --edge_x=EDGE_X  --edge_y=EDGE_Y  --check_consistency  --stack_mode  --debug_mode

Automated estimation of CTF parameters with error assessment.

All Micrographs Mode - Process all micrographs in a directory: 
	Specify a list of input micrographs using a wild card (*), called here input micrographs path pattern. 
	Use the wild card to indicate the place of variable part of the file names (e.g. serial number, time stamp, and etc). 
	Running from the command line requires enclosing the string by single quotes (') or double quotes ("). 
	sxgui.py will automatically adds single quotes to the string. 
	BDB files can not be selected as input micrographs. 
	Then, specify output directory where all outputs should be saved. 
	In this mode, all micrographs matching the path pattern will be processed.

	mpirun -np 16 sxcter.py './mic*.hdf' outdir_cter --wn=512 --apix=2.29 --Cs=2.0 --voltage=300 --ac=10.0

Selected Micrographs Mode - Process all micrographs in a selection list file:
	In addition to input micrographs path pattern and output directry arguments, 
	specify a name of micrograph selection list text file using --selection_list option 
	(e.g. output of sxgui_unblur.py or sxgui_cter.py). The file extension must be ".txt". 
	In this mode, only micrographs in the selection list which matches the file name part of the pattern (ignoring the directory paths) will be processed. 
	If a micrograph name in the selection list does not exists in the directory specified by the micrograph path pattern, processing of the micrograph will be skipped.

	mpirun -np 16 sxcter.py './mic*.hdf' outdir_cter --selection_list=mic_list.txt --wn=512 --apix=2.29 --Cs=2.0 --voltage=300 --ac=10.0

Single Micrograph Mode - Process a single micrograph: 
	In addition to input micrographs path pattern and output directry arguments, 
	specify a single micrograph name using --selection_list option. 
	In this mode, only the specified single micrograph will be processed. 
	If this micrograph name does not matches the file name part of the pattern (ignoring the directory paths), the process will exit without processing it. 
	If this micrograph name matches the file name part of the pattern but does not exists in the directory which specified by the micrograph path pattern, again the process will exit without processing it. 
	Use single processor for this mode.

	sxcter.py './mic*.hdf' outdir_cter --selection_list=mic0.hdf --wn=512 --apix=2.29 --Cs=2.0 --voltage=300 --ac=10.0

Stack Mode - Process a particle stack (Not supported by SPHIRE GUI)):: 
	Use --stack_mode option, then specify the path of particle stack file (without wild card "*") and output directory as arguments. 
	This mode ignores --selection_list, --wn --overlap_x, --overlap_y, --edge_x, and --edge_y options. 
	Use single processor for this mode. Not supported by SPHIRE GUI (sxgui.py). 

	sxcter.py bdb:stack outdir_cter --apix=2.29 --Cs=2.0 --voltage=300 --ac=10.0 --stack_mode

"""
	parser = optparse.OptionParser(usage, version=sp_global_def.SPARXVERSION)
	parser.add_option("--selection_list",	type="string",        default=None,   help="Micrograph selecting list: Specify path of a micrograph selection list text file for Selected Micrographs Mode. The file extension must be \'.txt\'. Alternatively, the file name of a single micrograph can be specified for Single Micrograph Mode. (default none)")
	parser.add_option("--wn",				type="int",           default=512,    help="CTF window size [pixels]: The size should be slightly larger than particle box size. This will be ignored in Stack Mode. (default 512)")
	parser.add_option("--apix",				type="float",         default=-1.0,   help="Pixel size [A/Pixels]: The pixel size of input micrograph(s) or images in input particle stack. (default -1.0)")
	parser.add_option("--Cs",				type="float",         default=2.0,    help="Microscope spherical aberration (Cs) [mm]: The spherical aberration (Cs) of microscope used for imaging. (default 2.0)")
	parser.add_option("--voltage",			type="float",         default=300.0,  help="Microscope voltage [kV]: The acceleration voltage of microscope used for imaging. (default 300.0)")
	parser.add_option("--ac",				type="float",         default=10.0,   help="Amplitude contrast [%]: The typical amplitude contrast is in the range of 7% - 14%. The value mainly depends on the thickness of the ice embedding the particles. (default 10.0)")
	parser.add_option("--f_start",			type="float",         default=-1.0,   help="Lowest resolution [A]: Lowest resolution to be considered in the CTF estimation. Determined automatically by default. (default -1.0)")
	parser.add_option("--f_stop",			type="float",         default=-1.0,   help="Highest resolution [A]: Highest resolution to be considered in the CTF estimation. Determined automatically by default. (default -1.0)")
	parser.add_option("--kboot",			type="int",           default=16,     help="Number of CTF estimates per micrograph: Used for error assessment. (default 16)")
	parser.add_option("--overlap_x",		type="int",           default=50,     help="X overlap [%]: Overlap between the windows in the x direction. This will be ignored in Stack Mode. (default 50)")
	parser.add_option("--overlap_y",		type="int",           default=50,     help="Y overlap [%]: Overlap between the windows in the y direction. This will be ignored in Stack Mode. (default 50)")
	parser.add_option("--edge_x",			type="int",           default=0,      help="Edge x [pixels]: Defines the edge of the tiling area in the x direction. Normally it does not need to be modified. This will be ignored in Stack Mode. (default 0)")
	parser.add_option("--edge_y",			type="int",           default=0,      help="Edge y [pixels]: Defines the edge of the tiling area in the y direction. Normally it does not need to be modified. This will be ignored in Stack Mode. (default 0)")
	parser.add_option("--check_consistency",action="store_true",  default=False,  help="Check consistency of inputs: Create a text file containing the list of inconsistent Micrograph ID entries (i.e. inconsist_mic_list_file.txt). (default False)")
	parser.add_option("--stack_mode",		action="store_true",  default=False,  help="Use stack mode: Use a stack as the input. Please set the file path of a stack as the first argument and output directory for the second argument. This is advanced option. Not supported by sxgui. (default False)")
	parser.add_option("--debug_mode",		action="store_true",  default=False,  help="Enable debug mode: Print out debug information. (default False)")
	parser.add_option("--vpp",				action="store_true",  default=False,  help="Volta Phase Plate - fit smplitude contrast. (default False)")
	parser.add_option("--defocus_min",		type="float",         default=0.3,    help="Minimum defocus search [um] (default 0.3)")
	parser.add_option("--defocus_max",		type="float",         default=9.0,    help="Maximum defocus search [um] (default 9.0)")
	parser.add_option("--defocus_step",		type="float",         default=0.1,    help="Step defocus search [um] (default 0.1)")
	parser.add_option("--phase_min",		type="float",         default=5.0,    help="Minimum phase search [degrees] (default 5.0)")
	parser.add_option("--phase_max",		type="float",         default=175.0,  help="Maximum phase search [degrees] (default 175.0)")
	parser.add_option("--phase_step",		type="float",         default=5.0,    help="Step phase search [degrees] (default 5.0)")
	parser.add_option("--pap",				action="store_true",  default=False,  help="Use power spectrum for fitting. (default False)")

	(options, args) = parser.parse_args(sys.argv[1:])

	# ====================================================================================
	# Prepare processing
	# ====================================================================================
	# ------------------------------------------------------------------------------------
	# Set up MPI related variables
	# ------------------------------------------------------------------------------------
	# Detect if program is running under MPI
	RUNNING_UNDER_MPI = "OMPI_COMM_WORLD_SIZE" in os.environ

	main_mpi_proc = 0
	if RUNNING_UNDER_MPI:
		####mpi.mpi_init( 0, [] )
		my_mpi_proc_id = mpi.mpi_comm_rank(mpi.MPI_COMM_WORLD)
		n_mpi_procs    = mpi.mpi_comm_size(mpi.MPI_COMM_WORLD)
		sp_global_def.MPI = True

	else:
		my_mpi_proc_id = 0
		n_mpi_procs = 1
	
	# ------------------------------------------------------------------------------------
	# Set up SPHIRE global definitions
	# ------------------------------------------------------------------------------------
	if sp_global_def.CACHE_DISABLE:
		pass#IMPORTIMPORTIMPORT from sp_utilities import disable_bdb_cache
		sp_utilities.disable_bdb_cache()

	# Change the name log file for error message
	original_logfilename = sp_global_def.LOGFILE
	sp_global_def.LOGFILE = os.path.splitext(program_name)[0] + '_' + original_logfilename + '.txt'

	# ------------------------------------------------------------------------------------
	# Check error conditions of arguments and options, then prepare variables for arguments
	# ------------------------------------------------------------------------------------
	input_image_path = None
	output_directory = None
	# not a real while, an if with the opportunity to use break when errors need to be reported
	error_status = None
	# change input unit
	freq_start = -1.0
	freq_stop  = -1.0
	
	if options.f_start >0.0: 
		if options.f_start <=0.5: 
			sp_global_def.ERROR( "f_start should be in Angstrom" ) # exclude abs frequencies and spatial frequencies
		else: 
			freq_start = 1./options.f_start
		
	if options.f_stop >0.0:
		if options.f_stop  <=0.5: 
			sp_global_def.ERROR( "f_stop should be in Angstrom" ) # exclude abs frequencies and spatial frequencies
		else: 
			freq_stop = 1./options.f_stop

	while True:
		# --------------------------------------------------------------------------------
		# Check the number of arguments. If OK, then prepare variables for them
		# --------------------------------------------------------------------------------
		if len(args) != 2:
			error_status = ("Please check usage for number of arguments.\n Usage: " + usage + "\n" + "Please run %s -h for help." % (program_name), inspect.getframeinfo(inspect.currentframe()))
			break

		# NOTE: 2015/11/27 Toshio Moriya
		# Require single quotes (') or double quotes (") when input micrograph pattern is give for input_image_path
		#  so that sys.argv does not automatically expand wild card and create a list of file names
		#
		input_image_path = args[0]
		output_directory = args[1]

		# --------------------------------------------------------------------------------
		# NOTE: 2016/03/17 Toshio Moriya
		# cter_mrk() will take care of all the error conditions 
		# --------------------------------------------------------------------------------

		break
	sp_utilities.if_error_then_all_processes_exit_program(error_status)
	#  Toshio, please see how to make it informative
	assert input_image_path != None, " directory  missing  input_image_path"
	assert output_directory != None, " directory  missing  output_directory"

	if options.vpp == False :
		wrong_params = False
		pass#IMPORTIMPORTIMPORT import string as str
		vpp_options = ["--defocus_min","--defocus_max","--defocus_step","--phase_min","--phase_max","--phase_step"]
		for command_token in sys.argv:
			for vppo in vpp_options:
				if str.find(command_token, vppo) > -1 : wrong_params = True
				if wrong_params: break
			if wrong_params: break
		if wrong_params:  
			sp_global_def.ERROR( "Some options are valid only for Volta Phase Plate command  s" % command_token, myid=my_mpi_proc_id )

	if my_mpi_proc_id == main_mpi_proc:
		command_line = ""
		for command_token in sys.argv:
			command_line += command_token + "  "
		sp_global_def.sxprint(" ")
		sp_global_def.sxprint("Shell line command:")
		sp_global_def.sxprint(command_line)

	if options.vpp:
		vpp_options = [options.defocus_min,  options.defocus_max,  options.defocus_step,  options.phase_min,  options.phase_max,  options.phase_step]
		pass#IMPORTIMPORTIMPORT from sp_morphology import cter_vpp
		result = sp_morphology.cter_vpp(input_image_path, output_directory, options.selection_list, options.wn, \
				options.apix, options.Cs, options.voltage, options.ac, freq_start, freq_stop, \
				options.kboot, options.overlap_x, options.overlap_y, options.edge_x, options.edge_y, \
				options.check_consistency, options.stack_mode, options.debug_mode, program_name, vpp_options, \
				RUNNING_UNDER_MPI, main_mpi_proc, my_mpi_proc_id, n_mpi_procs)
	elif options.pap:
		pass#IMPORTIMPORTIMPORT from sp_morphology import cter_pap
		result = sp_morphology.cter_pap(input_image_path, output_directory, options.selection_list, options.wn, \
				options.apix, options.Cs, options.voltage, options.ac, freq_start, freq_stop, \
				options.kboot, options.overlap_x, options.overlap_y, options.edge_x, options.edge_y, \
				options.check_consistency, options.stack_mode, options.debug_mode, program_name, \
				RUNNING_UNDER_MPI, main_mpi_proc, my_mpi_proc_id, n_mpi_procs)
	else:
		pass#IMPORTIMPORTIMPORT from sp_morphology import cter_mrk
		result = sp_morphology.cter_mrk(input_image_path, output_directory, options.selection_list, options.wn, \
				options.apix, options.Cs, options.voltage, options.ac, freq_start, freq_stop, \
				options.kboot, options.overlap_x, options.overlap_y, options.edge_x, options.edge_y, \
				options.check_consistency, options.stack_mode, options.debug_mode, program_name, \
				RUNNING_UNDER_MPI, main_mpi_proc, my_mpi_proc_id, n_mpi_procs)

	if RUNNING_UNDER_MPI:
		mpi.mpi_barrier(mpi.MPI_COMM_WORLD)

	if main_mpi_proc == my_mpi_proc_id:
		if options.debug_mode:
			sp_global_def.sxprint("Returned value from cter_mrk() := ", result)
		sp_global_def.sxprint(" ")
		sp_global_def.sxprint("DONE!!!")
		sp_global_def.sxprint(" ")

	# ====================================================================================
	# Clean up
	# ====================================================================================
	# ------------------------------------------------------------------------------------
	# Reset SPHIRE global definitions
	# ------------------------------------------------------------------------------------
	sp_global_def.LOGFILE = original_logfilename
	
	# ------------------------------------------------------------------------------------
	# Clean up MPI related variables
	# ------------------------------------------------------------------------------------
	if RUNNING_UNDER_MPI:
		mpi.mpi_barrier( mpi.MPI_COMM_WORLD )

	sys.stdout.flush()
	return
Exemplo n.º 47
0
def get_frame_info(level):
    caller_frame = inspect.stack()[level]
    info = inspect.getframeinfo(caller_frame[0])
    return info.filename + ':' + str(info.lineno) + ': '
Exemplo n.º 48
0
    def check_coherence_radiation(self,
                                  threshold_pct,
                                  dni,
                                  ghi,
                                  dhi,
                                  radiation_threshold=None):
        # Check radiation coherence between GHI and DNI&DHI
        # THRESHOLD is in percentage

        name_check_function = inspect.getframeinfo(
            inspect.currentframe()).function

        if radiation_threshold is None:
            radiation_threshold = GHI_RADIATION_THRESHOLD

        df_filt = self.df[self.df[ghi] > GHI_RADIATION_THRESHOLD]

        if len(df_filt) == 0:  # Avoids future errors
            return None

        _, Zz = mc_solar.solpos(df_filt.index)

        ghi_model = (df_filt[dhi] + df_filt[dni] * np.cos(Zz))

        condition_list = (((df_filt[ghi] - ghi_model).abs()) / df_filt[ghi] *
                          100 < threshold_pct)

        buffer = None
        if not condition_list.all():
            plt.figure()
            df_filt[ghi].plot(style='.')
            df_filt[ghi][~condition_list].plot(style='rP')
            #            plt.legend()
            plt.title(name_check_function)
            plt.suptitle(self.type_data_station, fontsize=18)

            buffer = io.BytesIO()
            plt.savefig(buffer)
            buffer.seek(0)

        num_radiation_transitions_value = mc_solar.num_radiation_transitions(
            self.df[ghi])

        if num_radiation_transitions_value < NUM_RADIATION_TRANSITIONS_THRESHOLD:
            self.assertion_base(
                condition=condition_list.all(),
                error_message=
                'No coherence between radiations considering a percentage threshold of GHI {}% in {}'
                .format(threshold_pct, df_filt[~condition_list].index),
                check_type=name_check_function,
                figure=buffer)
        else:
            self.assertion_base(
                condition=False,
                error_message=
                'Radiation coherence based on GHI not checked because the number of cloudy moments={} [with a DRADIATION_DT={}] is higher than threshold={}'
                .format(num_radiation_transitions_value, DRADIATION_DT,
                        NUM_RADIATION_TRANSITIONS_THRESHOLD),
                error_level='INFO',
                check_type=name_check_function,
                figure=buffer)
Exemplo n.º 49
0
def run_and_report(func: Any) -> Any:
    try:
        return func()
    except Exception as ex:
        if _is_env_set("HYDRA_FULL_ERROR"):
            raise ex
        else:
            if isinstance(ex, CompactHydraException):
                sys.stderr.write(str(ex) + os.linesep)
                if isinstance(ex.__cause__, OmegaConfBaseException):
                    sys.stderr.write(str(ex.__cause__) + os.linesep)
            else:
                # Custom printing that strips the Hydra related stack frames from the top
                # And any omegaconf frames from the bottom.
                # It is possible to add additional libraries to sanitize from the bottom later,
                # maybe even make it configurable.
                tb: Any = ex.__traceback__
                search_max = 10
                # strip Hydra frames from start of stack
                # will strip until it hits run_job()
                while search_max > 0:
                    if tb is None:
                        break
                    frame = tb.tb_frame
                    tb = tb.tb_next
                    search_max = search_max - 1
                    if inspect.getframeinfo(frame).function == "run_job":
                        break

                if search_max == 0 or tb is None:
                    # could not detect run_job, probably a runtime exception before we got there.
                    # do not sanitize the stack trace.
                    print_exc()
                    sys.exit(1)

                # strip OmegaConf frames from bottom of stack
                end = tb
                num_frames = 0
                while end is not None:
                    frame = end.tb_frame
                    mdl = inspect.getmodule(frame)
                    assert mdl is not None
                    name = mdl.__name__
                    if name.startswith("omegaconf."):
                        break
                    end = end.tb_next
                    num_frames = num_frames + 1

                @dataclass
                class FakeTracebackType:
                    tb_next: Any = None  # Optional[FakeTracebackType]
                    tb_frame: Optional[FrameType] = None
                    tb_lasti: Optional[int] = None
                    tb_lineno: Optional[int] = None

                iter_tb = tb
                final_tb = FakeTracebackType()
                cur = final_tb
                added = 0
                while True:
                    cur.tb_lasti = iter_tb.tb_lasti
                    cur.tb_lineno = iter_tb.tb_lineno
                    cur.tb_frame = iter_tb.tb_frame

                    if added == num_frames - 1:
                        break
                    added = added + 1
                    cur.tb_next = FakeTracebackType()
                    cur = cur.tb_next
                    iter_tb = iter_tb.tb_next

                print_exception(etype=None, value=ex,
                                tb=final_tb)  # type: ignore
            sys.stderr.write(
                "\nSet the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.\n"
            )
        sys.exit(1)
Exemplo n.º 50
0
def api_remove_ml_backend():
    project = project_get_or_create()
    ml_backend_name = request.json['name']
    project.remove_ml_backend(ml_backend_name)
    project.analytics.send(getframeinfo(currentframe()).function)
    return make_response(jsonify('Deleted!'), 204)
Exemplo n.º 51
0
def _insert_alias_assertions(tn, parsed, aliases_dict, aliases_case_sensitive):

    time_string = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    # convert aliases_dict into dictionary of dataframes
    aliases = {
        k: _read_file_from_path_or_read_str_as_buffer(v,
                                                      pd.read_csv,
                                                      encoding='utf8',
                                                      skipinitialspace=True)
        for k, v in aliases_dict.items()
        if k in tn.node_types['node_type'].array
    }

    # Get relevant aliases out of each set of aliases
    aliases = {
        k: _select_aliases(v, parsed, k, aliases_case_sensitive)
        for k, v in aliases.items()
    }
    if not aliases_case_sensitive:
        low_keys = {
            k: v.apply(lambda x: x.str.casefold() if x.name == 'key' else x)
            for k, v in aliases.items()
        }
        low_values = {
            k: v.apply(lambda x: x.str.casefold() if x.name == 'value' else x)
            for k, v in aliases.items()
        }
        aliases = {
            k: pd.concat([v, low_keys[k], low_values[k]]).drop_duplicates()
            for k, v in aliases.items()
        }

    # get integer IDs for the relevant node types
    node_type_ids = {
        k: tn.id_lookup('node_types', k, column_label='node_type')
        for k in aliases.keys()
    }

    # add any new strings we found to the textnet strings frame
    new_strings = [
        pd.DataFrame({
            'string': v.stack().array,
            'node_type_id': node_type_ids[k],
            'date_inserted': time_string,
            'date_modified': pd.NA
        }) for k, v in aliases.items()
    ]
    new_strings = pd.concat(new_strings).drop_duplicates()
    # print(new_strings.string, '\n\n\n')
    # Add a node type for alias reference strings and add the alias
    # reference strings to the new strings
    tn.insert_node_type('alias_reference')
    alias_ref_node_id = tn.id_lookup('node_types', 'alias_reference')
    alias_ref_strings = pd.DataFrame({
        'string': aliases_dict.values(),
        'node_type_id': alias_ref_node_id,
        'date_inserted': time_string,
        'date_modified': pd.NA
    })
    new_strings = pd.concat([new_strings, alias_ref_strings])

    # drop any strings already present in tn.strings
    new_strings = new_strings.loc[~new_strings['string'].
                                  isin(tn.strings['string'])]

    # Insert the new strings in tn.strings
    new_strings = shnd.util.normalize_types(new_strings, tn.strings)
    tn.strings = pd.concat([tn.strings, new_strings])

    # make maps between string values and integer IDs relevant to each
    # set of aliases
    string_maps = {
        k: bg.util.get_string_values(tn, node_type_subset=v)
        for k, v in node_type_ids.items()
    }

    if not aliases_case_sensitive:
        low_maps = {k: v.str.casefold() for k, v in string_maps.items()}
        case_alias = {k: (v != low_maps[k]) for k, v in string_maps.items()}
        new_aliases = {
            k: pd.concat([v[case_alias[k]], low_maps[k][case_alias[k]]],
                         axis='columns',
                         ignore_index=True)
            for k, v in string_maps.items()
        }
        new_aliases = {
            k: v.rename(columns={
                0: 'key',
                1: 'value'
            })
            for k, v in new_aliases.items()
        }
        # print(new_aliases['actor'], '\n\n\n')
        aliases = {
            k: pd.concat([v, new_aliases[k]])
            for k, v in aliases.items()
        }

    # print(string_maps['actor'], '\n\n\n')
    # print(aliases['actor'], '\n\n\n')

    string_maps = {
        k: pd.Series(v.index, index=v.array)
        for k, v in string_maps.items()
    }
    '''# convert alias string values to integer string IDs
    aliases = {
        k: v.apply(lambda x: x.map(string_maps[k]))
        for k, v in aliases.items()
    }'''

    # convert alias keys to integer string IDs
    def merge_keys_and_values(aliases, string_map):
        k = aliases.merge(string_map.rename('k'),
                          left_on='key',
                          right_index=True)
        v = aliases.merge(string_map.rename('v'),
                          left_on='value',
                          right_index=True)
        output = k[['k', 'key']].merge(v[['v', 'key']])[['k', 'v']]
        output = output.rename(columns={'k': 'key', 'v': 'value'})
        return output.drop_duplicates()

    aliases = {
        k: merge_keys_and_values(v, string_maps[k])
        for k, v in aliases.items()
    }
    # print(
    #     aliases['actor'].apply(lambda x: x.map(tn.strings['string'])),
    #     '\n\n\n'
    # )

    # Start building an assertions frame out of string values
    new_assertions = {
        k: pd.DataFrame({
            'src_string_id': aliases[k]['key'],
            'tgt_string_id': aliases[k]['value'],
            'ref_string_id': tn.id_lookup('strings', v)
        })
        for k, v in aliases_dict.items()
    }

    new_assertions = pd.concat(new_assertions.values(), ignore_index=True)

    # make a string value representing the current python function
    frame = inspect.currentframe()
    current_module = inspect.getframeinfo(frame).filename
    current_module = Path(current_module).stem.split('.')[0]
    current_function = inspect.getframeinfo(frame).function
    inp_string = '.'.join(['bibliograph', current_module, current_function])

    # Insert a strings row for the input string
    new_string = {
        'string': inp_string,
        'node_type_id': tn.id_lookup('node_types', 'python_function'),
        'date_inserted': time_string,
        'date_modified': pd.NA
    }
    new_string = shnd.util.normalize_types(new_string, tn.strings)
    tn.strings = pd.concat([tn.strings, new_string])

    # Add a link type for aliases if it doesn't already exist
    tn.insert_link_type('alias')
    alias_link_type_id = tn.id_lookup('link_types', 'alias')

    # make a dataframe with the remaining data for the assertions
    # frame and then concat it with the partial assertions frame
    assertion_metadata = pd.DataFrame(
        {
            'inp_string_id': tn.strings.index[-1],
            'link_type_id': alias_link_type_id,
            'date_inserted': time_string,
            'date_modified': pd.NA
        },
        index=new_assertions.index)
    new_assertions = pd.concat([new_assertions, assertion_metadata],
                               axis='columns')

    # put the new assertions columns in the right order and then
    # add them to the textnet assertions
    new_assertions = new_assertions[tn.assertions.columns]
    new_assertions = shnd.util.normalize_types(new_assertions, tn.assertions)
    tn.assertions = pd.concat([tn.assertions, new_assertions])
Exemplo n.º 52
0
def api_instruction():
    """ Instruction for annotators
    """
    project = project_get_or_create()
    project.analytics.send(getframeinfo(currentframe()).function)
    return make_response(project.config['instruction'], 200)
from array import array
from time import time
from inspect import currentframe,getframeinfo
frameinfo=getframeinfo(currentframe())

def brute_force(arr: array)->int:

    numInv=0
    for i in range(0,len(arr)-1):
        for j in range(i+1,len(arr)):
            if arr[i]>arr[j]:
                numInv+=1
    return numInv


def merge_and_count_splitinv(arr1: array, arr2: array)->(array,int):
    
    #if arr2[0]==0:
    #    arr2.pop(0) 
    n = len(arr1)+len(arr2)
    arr_fin= array("i",[])

    i,j,splitinv=0,0,0
    
    for k in range(n):
        #checking if one of the iterators is at the end
        if i==int(len(arr1)):
            #print("inside 1st i")
            #print(locals())
            arr_fin.extend(arr2[j:])
Exemplo n.º 54
0
def api_project_storage_settings():
    project = project_get_or_create()

    # GET: return selected form, populated with current storage parameters
    if request.method == 'GET':
        # render all forms for caching in web
        all_forms = {'source': {}, 'target': {}}
        for storage_for in all_forms:
            for name, description in project.get_available_storage_names(
                    storage_for).items():
                current_type = project.config.get(storage_for,
                                                  {'type': ''})['type']
                current = name == current_type
                form_class = get_storage_form(name)
                form = form_class(data=project.get_storage(
                    storage_for).get_params()) if current else form_class()
                all_forms[storage_for][name] = {
                    'fields': [serialize_class(field) for field in form],
                    'type':
                    name,
                    'current':
                    current,
                    'description':
                    description,
                    'path':
                    getattr(project, storage_for + '_storage').readable_path
                }
                # generate data key automatically
                if project.data_types.keys():
                    for field in all_forms[storage_for][name]['fields']:
                        if field['name'] == 'data_key' and not field['data']:
                            field['data'] = list(project.data_types.keys())[0]
        project.analytics.send(getframeinfo(currentframe()).function,
                               method=request.method)
        return make_response(jsonify(all_forms), 200)

    # POST: update storage given filled form
    if request.method == 'POST':
        selected_type = request.args.get('type', '')
        storage_for = request.args.get('storage_for')
        current_type = project.config.get(storage_for, {'type': ''})['type']
        selected_type = selected_type if selected_type else current_type

        form = get_storage_form(selected_type)(data=request.json)
        project.analytics.send(getframeinfo(currentframe()).function,
                               method=request.method,
                               storage=selected_type,
                               storage_for=storage_for)
        if form.validate_on_submit():
            storage_kwargs = dict(form.data)
            storage_kwargs['type'] = request.json['type']  # storage type
            try:
                project.update_storage(storage_for, storage_kwargs)
            except Exception as e:
                traceback = tb.format_exc()
                logger.error(str(traceback))
                return make_response(
                    jsonify(
                        {'detail': 'Error while storage update: ' + str(e)}),
                    400)
            else:
                return make_response(jsonify({'result': 'ok'}), 201)
        else:
            logger.error('Errors: ' + str(form.errors) + ' for request body ' +
                         str(request.json))
            return make_response(jsonify({'errors': form.errors}), 400)
Exemplo n.º 55
0
    def save(self):
        super(Metadata, self).save()
        # Execute only if the caller is not in watch. (See above)
        caller = inspect.getframeinfo(sys._getframe(1), context=0)[2]
        try:
            if not caller == 'f':

                # Save overlapping Keyword / Metadata fields
                k = Keyword.objects.get(pk=self.image.pk)
                k.cright = self.copyright
                k.subject = self.subject
                k.source = self.source
                k.keywords = self.keywords
                k.save()

                # Save overlapping Image / Metadata fields
                i = Image.objects.get(pk=self.image.pk)
                i.group_status = self.headline
                i.save()

                m = metadata.Metadata()
                path = os.path.join(settings.APP_CONTENT_ROOT,
                                    i.image_real_path)

                cmdDict = {
                    'source':
                    self.source,
                    'captionwriter':
                    self.caption_writer,
                    'subject':
                    self.keywords,
                    'xmp:keywords':
                    self.keywords,  # revision 182, changed to xmp keywords
                    'description':
                    self.description,
                    'location':
                    self.location,
                    'city':
                    self.city,
                    'province-state':
                    self.provincestate,
                    'country':
                    self.country,
                    'instructions':
                    self.instructions,
                    'title':
                    self.subject,
                    'creatortool':
                    self.creator_tool,
                    'creator':
                    self.creator,
                    'author':
                    self.author,
                    'credit':
                    self.credit,
                    'xmp:headline':
                    self.
                    headline,  # the xmp prefix should accomodate illustrator and other pdf-based file formats
                    'album':
                    self.album,
                    'documentname':
                    self.subject,
                    'copyright':
                    'yes' if self.copyright == 1 else
                    'no' if self.copyright == 0 else 'unknown',
                    'orientation':
                    self.orientation
                }

                if self.document:
                    cmdDict['ManagedFromFilePath'] = self.document.path

                m.exifWriteAll(cmdDict, path)

            else:

                logging.info(
                    "Caller was %s so no models.Metadata super save()" %
                    caller)

        finally:
            del caller
Exemplo n.º 56
0
def main(path=None,
         browser=None,
         report=None,
         title="Seldom Test Report",
         description="Test case execution",
         debug=False,
         rerun=0,
         save_last_run=False,
         driver_path=None,
         grid_url=None):
    """
    runner test case
    :param path:
    :param browser:
    :param report:
    :param title:
    :param description:
    :param debug:
    :param rerun:
    :param save_last_run:
    :param driver_path:
    :param grid_url:
    :return:
    """

    if path is None:
        stack_t = inspect.stack()
        ins = inspect.getframeinfo(stack_t[1][0])
        file_dir = os.path.dirname(os.path.abspath(ins.filename))
        file_path = ins.filename
        if "\\" in file_path:
            this_file = file_path.split("\\")[-1]
        elif "/" in file_path:
            this_file = file_path.split("/")[-1]
        else:
            this_file = file_path
        suits = unittest.defaultTestLoader.discover(file_dir, this_file)
    else:
        if len(path) > 3:
            if path[-3:] == ".py":
                if "/" in path:
                    path_list = path.split("/")
                    path_dir = path.replace(path_list[-1], "")
                    suits = unittest.defaultTestLoader.discover(path_dir, pattern=path_list[-1])
                else:
                    suits = unittest.defaultTestLoader.discover(os.getcwd(), pattern=path)
            else:
                suits = unittest.defaultTestLoader.discover(path)
        else:
            suits = unittest.defaultTestLoader.discover(path)

    if browser is None:
        BrowserConfig.name = "chrome"
    else:
        BrowserConfig.name = browser
        BrowserConfig.grid_url = grid_url

    if driver_path is not None:
        ret = os.path.exists(driver_path)
        if ret is False:
            raise ValueError("Browser - driven path error,Please check if the file exists. => {}".format(driver_path))
        BrowserConfig.driver_path = driver_path

    """
    Global launch browser
    """
    Seldom.driver = b(BrowserConfig.name, BrowserConfig.driver_path, BrowserConfig.grid_url)

    if debug is False:
        for filename in os.listdir(os.getcwd()):
            if filename == "reports":
                break
        else:
            os.mkdir(os.path.join(os.getcwd(), "reports"))

        if report is None:
            now = time.strftime("%Y_%m_%d_%H_%M_%S")
            report = os.path.join(os.getcwd(), "reports", now + "_result.html")
            BrowserConfig.report_path = report

        with(open(report, 'wb')) as fp:
            runner = HTMLTestRunner(stream=fp, title=title, description=description)
            log.info(seldom_str)
            runner.run(suits, rerun=rerun, save_last_run=save_last_run)
        print("generated html file: file:///{}".format(report))
    else:
        runner = unittest.TextTestRunner(verbosity=2)
        log.info("A run the test in debug mode without generating HTML report!")
        log.info(seldom_str)
        runner.run(suits)

    """
    Close browser globally
    """
    Seldom.driver.quit()
Exemplo n.º 57
0
import os.path
import logging
import sys
import inspect
import mitmproxy.websocket

# This file is executed by `mitmdump' with `execfile'. Therefore, in
# order to import submodules under the directory where this file exists,
# we need some tricks. See http://stackoverflow.com/questions/3718657
# for the details of the tricks used in the following lines.
THIS_FILENAME = inspect.getframeinfo(inspect.currentframe()).filename
THIS_DIR_PATH = os.path.dirname(os.path.abspath(THIS_FILENAME))
sys.path.append(THIS_DIR_PATH)
import mahjongsoul_sniffer.logging as logging_
from mahjongsoul_sniffer.redis_mirroring import RedisMirroring

logging_.initialize(module_name='game_detail_crawler', service_name='sniffer')

_REDIS_MIRRORING_CONFIG = {
    'websocket': {
        '.lq.Lobby.loginBeat': {
            'request_direction': 'outbound',
            'action': {
                'command': 'SET',
                'key': 'login-beat'
            }
        },
        '.lq.Lobby.fetchGameRecord': {
            'request_direction': 'outbound',
            'action': {
                'command': 'SET',
Exemplo n.º 58
0
import unittest
import os
import inspect
import sys

proj_dir = os.path.abspath(os.path.join(inspect.getframeinfo(inspect.currentframe()).filename, '../../'))
sys.path.append(os.path.join(proj_dir, 'src'))

data_dir = os.path.join(proj_dir, 'data')
tmp_dir = os.path.join(proj_dir, 'tmp')

if not os.path.exists(tmp_dir):
  os.mkdir(tmp_dir)

import polca

fnames_ascii = '''E.coli
a.txt
aaa.txt
alice29.txt
alphabet.txt
asyoulik.txt
bib
bible.txt
book1
book2
download.sh
fields.c
grammar.lsp
lcet10.txt
news
Exemplo n.º 59
0
    def check_coherence_isotypes(self,
                                 dni,
                                 top,
                                 mid,
                                 bot,
                                 threshold_pct,
                                 radiation_threshold=None):
        #         Check radiation coherence between DNI and isotypes
        #         THRESHOLD is in percentage
        name_check_function = inspect.getframeinfo(
            inspect.currentframe()).function

        if radiation_threshold is None:
            radiation_threshold = DNI_RADIATION_THRESHOLD

        df_filt = self.df[self.df[dni] > DNI_RADIATION_THRESHOLD]

        if len(df_filt) == 0:  # Avoids future errors
            return None

        dni_model = (df_filt[top] * 0.51 + df_filt[mid] * 0.10 +
                     df_filt[bot] * 0.39)

        condition_list = (((df_filt[dni] - dni_model).abs()) / df_filt[dni] *
                          100 < threshold_pct)

        buffer = None
        if not condition_list.all():
            plt.figure()
            df_filt[dni].plot(style='k.')
            df_filt[top].plot(style='.')
            df_filt[mid].plot(style='.')
            df_filt[bot].plot(style='.')
            df_filt[dni][~condition_list].plot(marker='P',
                                               markersize=8,
                                               color='darkred',
                                               markeredgecolor='yellow',
                                               markeredgewidth=2)
            plt.legend([top, mid, bot])
            plt.title(name_check_function)
            plt.suptitle(self.type_data_station, fontsize=18)

            buffer = io.BytesIO()
            plt.savefig(buffer)
            buffer.seek(0)

        irrad_filt = self.df[dni][lambda m: m > DNI_RADIATION_THRESHOLD]
        num_radiation_transitions_value = mc_solar.num_radiation_transitions(
            irrad_filt)

        if num_radiation_transitions_value < NUM_RADIATION_TRANSITIONS_THRESHOLD:
            self.assertion_base(
                condition=condition_list.all(),
                error_message=
                'No coherence between DNI radiation and isotypes considering a percentage threshold of {} % in {}'
                .format(threshold_pct, df_filt[dni][~condition_list].index),
                check_type=name_check_function,
                figure=buffer)
        else:
            self.assertion_base(
                condition=False,
                error_message=
                'DNI vs isotypes comparison not checked because the number of cloudy moments={} [with a DRADIATION_DT={}] is higher than threshold={}'
                .format(num_radiation_transitions_value, DRADIATION_DT,
                        NUM_RADIATION_TRANSITIONS_THRESHOLD),
                error_level='INFO',
                check_type=name_check_function,
                figure=buffer)
Exemplo n.º 60
0
    def test_model_against_training_data(
            self,
            include_detailed_accuracy_stats = False
    ):
        start_get_td_time = pf.Profiling.start()
        lg.Log.info(
            str(self.__class__) + ' ' + str(getframeinfo(currentframe()).lineno)
            + '.   Start Load Training Data: ' + str(start_get_td_time)
        )

        # Get training data to improve LeBot intent/command detection
        self.model.load_training_data_from_storage()
        td = self.model.training_data
        lg.Log.debug(
            str(self.__class__) + ' ' + str(getframeinfo(currentframe()).lineno)
            + ': TD x_name, shape=' + str(td.get_x_name().shape) + ': ' +  str(td.get_x_name())
            + '\n\rTD shape=' + str(td.get_x().shape)
            + '\n\rTD[0:10] =' + str(td.get_x()[0:10])
        )

        stop_get_td_time = pf.Profiling.stop()
        lg.Log.info(
            str(self.__class__) + ' ' + str(getframeinfo(currentframe()).lineno)
            + '.   Stop Load Training Data: '
            + str(pf.Profiling.get_time_dif_str(start_get_td_time, stop_get_td_time)))

        start_test_time = pf.Profiling.start()
        lg.Log.info(
            str(self.__class__) + ' ' + str(getframeinfo(currentframe()).lineno)
            + '.   Start Testing of Training Data from DB Time : ' + str(start_get_td_time)
        )
        #
        # Read from chatbot training files to compare with LeBot performance
        #
        self.reset_test_stats()
        self.test_stats[ModelBackTest.KEY_STATS_START_TEST_TIME] = start_test_time

        x_name = td.get_x_name()
        x = td.get_x()
        y = td.get_y()
        for i in range(0, x.shape[0], 1):
            y_expected = y[i]
            v = nputil.NumpyUtil.convert_dimension(arr=x[i], to_dim=2)
            x_features = x_name[v[0]>0]

            df_match_details = self.predict_top_x(
                v = v,
                y_expected = y_expected,
                x_features = x_features
            )

            self.update_test_stats(
                df_match_details = df_match_details,
                y_expected = y_expected,
                x_features = x_features,
                include_detailed_accuracy_stats = include_detailed_accuracy_stats
            )

        stop_test_time = pf.Profiling.stop()
        lg.Log.important('.   Stop Testing of Training Data from DB Time : '
                   + str(pf.Profiling.get_time_dif_str(start_test_time, stop_test_time)))

        lg.Log.important(
            str(self.test_stats[ModelBackTest.KEY_STATS_RESULT_WRONG]) + ' wrong results from '
            + str(self.test_stats[ModelBackTest.KEY_STATS_RESULT_WRONG]) + ' total tests.'
        )
        for q in (0.0, 0.05, 0.1, 0.25, 0.50, 0.75, 0.9, 0.95):
            lg.Log.important(
                'Score Quantile (' + str(q) + '): '
                + str(self.test_stats[ModelBackTest.KEY_STATS_DF_SCORES]['Score'].quantile(q))
            )

        return