def debug_exception(): ''' Start a debugger on the exception currently being handled ''' import sys _,_, tb = sys.exc_info() debugger = _get_debugger() debugger.reset() debugger.interaction(None, tb)
def post_mortem(tb=None): ''' Helper function, like pdb.post_mortem ''' # handling the default if tb is None: # sys.exc_info() returns (type, value, traceback) if an exception is # being handled, otherwise it returns None tb = sys.exc_info()[2] if tb is None: raise ValueError("A valid traceback must be passed if no " "exception is being handled") debugger = Vdb() debugger.reset() debugger.interaction(None, tb)
def execution_loop(tasks, options): from time import sleep logging.info('Execute start (%s tasks)' % len(tasks)) # For the special (but common) case where most (if not all) of the tasks # can be loaded directly, just skip them as fast as possible: first_unloadable = 0 while (first_unloadable < len(tasks)) and tasks[first_unloadable].can_load(): t = tasks[first_unloadable] jug_hook('execute.task-loadable', (tasks[first_unloadable], )) first_unloadable += 1 del tasks[:first_unloadable] prevtask = None while tasks: upnext = [] # tasks that can be run nr_wait_cycles = int(options.execute_nr_wait_cycles) for i in range(nr_wait_cycles): max_cannot_run = min(len(tasks), 128) if i == nr_wait_cycles - 1: # in the last loop iteration, check all tasks to ensure we don't miss any max_cannot_run = len(tasks) for i in range(max_cannot_run): # The argument for this is the following: # if T' is dependent on the result of T, it is better if the # processor that ran T, also runs T'. By having everyone else # push T' to the end of tasks, this is more likely to happen. # # Furthermore, this avoids always querying the same tasks. if tasks[0].can_run(): break tasks.append(tasks.pop(0)) while tasks and tasks[0].can_run(): upnext.append(tasks.pop(0)) if upnext: break for ti, t in enumerate(tasks): if t.can_run(): upnext.append(tasks.pop(ti)) break if upnext: break logging.info('waiting %s secs for an open task...' % options.execute_wait_cycle_time_secs) sleep(int(options.execute_wait_cycle_time_secs)) if not upnext: logging.info('No tasks can be run!') break for t in upnext: if t.can_load(): jug_hook('execute.task-loadable', (t, )) continue locked = False try: locked = t.lock() if t.can_load( ): # This can be true if the task ran between the check above and this one jug_hook('execute.task-loadable', (t, )) elif locked: logging.info('Executing %s...' % t.name) jug_hook('execute.task-pre-execute', (t, )) if options.aggressive_unload: if prevtask is not None: active = set([id(d) for d in t.dependencies()]) for d in itertools.chain(prevtask.dependencies(), [prevtask]): if id(d) not in active: d.unload() prevtask = t t.run(debug_mode=options.debug) jug_hook('execute.task-executed1', (t, )) else: logging.info('Already in execution %s...' % t.name) except SystemExit: raise except Exception as e: if options.pdb: import sys _, _, tb = sys.exc_info() # The code below is a complex attempt to load IPython # debugger which works with multiple versions of IPython. # # Unfortunately, their API kept changing prior to the 1.0. try: import IPython try: import IPython.core.debugger try: from IPython.terminal.ipapp import load_default_config config = load_default_config() colors = config.TerminalInteractiveShell.colors except: import IPython.core.ipapi ip = IPython.core.ipapi.get() colors = ip.colors try: debugger = IPython.core.debugger.Pdb( colors.get_value(initial='Linux')) except AttributeError: debugger = IPython.core.debugger.Pdb(colors) except ImportError: #Fallback to older version of IPython API import IPython.ipapi import IPython.Debugger shell = IPython.Shell.IPShell(argv=['']) ip = IPython.ipapi.get() debugger = IPython.Debugger.Pdb(ip.options.colors) except ImportError: #Fallback to standard debugger import pdb debugger = pdb.Pdb() debugger.reset() debugger.interaction(None, tb) else: logging.critical('Exception while running %s: %s' % (t.name, e)) for other in itertools.chain(upnext, tasks): for dep in other.dependencies(): if dep is t: logging.critical( 'Other tasks are dependent on this one! Parallel processors will be held waiting!' ) if not options.execute_keep_going: raise finally: if locked: t.unlock() if options.aggressive_unload and prevtask is not None: prevtask.unload()
def execution_loop(tasks, options, tasks_executed, tasks_loaded): from time import sleep logging.info('Execute start (%s tasks)' % len(tasks)) while tasks: upnext = [] # tasks that can be run for i in range(int(options.execute_nr_wait_cycles)): max_cannot_run = min(len(tasks), 128) for i in range(max_cannot_run): # The argument for this is the following: # if T' is dependent on the result of T, it is better if the # processor that ran T, also runs T'. By having everyone else # push T' to the end of tasks, this is more likely to happen. # # Furthermore, this avoids always querying the same tasks. if tasks[0].can_run(): break tasks.append(tasks.pop(0)) while tasks and tasks[0].can_run(): upnext.append(tasks.pop(0)) if upnext: break for ti,t in enumerate(tasks): if t.can_run(): upnext.append(tasks.pop(ti)) break if upnext: break logging.info('waiting %s secs for an open task...' % options.execute_wait_cycle_time_secs) sleep(int(options.execute_wait_cycle_time_secs)) if not upnext: logging.info('No tasks can be run!') break for t in upnext: if t.can_load(): logging.info('Loadable %s...' % t.name) tasks_loaded[t.name] += 1 continue locked = False try: locked = t.lock() if t.can_load(): # This can be true if the task ran between the check above and this one logging.info('Loadable %s...' % t.name) tasks_loaded[t.name] += 1 elif locked: logging.info('Executing %s...' % t.name) t.run(debug_mode=options.debug) tasks_executed[t.name] += 1 if options.aggressive_unload: t.unload_recursive() else: logging.info('Already in execution %s...' % t.name) except Exception as e: if options.pdb: import sys _,_, tb = sys.exc_info() # The code below is a complex attempt to load IPython # debugger which works with multiple versions of IPython. # # Unfortunately, their API kept changing prior to the 1.0. try: import IPython try: import IPython.core.debugger try: from IPython.terminal.ipapp import load_default_config config = load_default_config() colors = config.TerminalInteractiveShell.colors except: import IPython.core.ipapi ip = IPython.core.ipapi.get() colors = ip.colors debugger = IPython.core.debugger.Pdb(colors) except ImportError: #Fallback to older version of IPython API import IPython.ipapi import IPython.Debugger shell = IPython.Shell.IPShell(argv=['']) ip = IPython.ipapi.get() debugger=IPythong.Debugger.Pdb(ip.options.colors) except ImportError: #Fallback to standard debugger import pdb debugger = pdb.Pdb() debugger.reset() debugger.interaction(None, tb) else: import itertools logging.critical('Exception while running %s: %s' % (t.name,e)) for other in itertools.chain(upnext, tasks): for dep in other.dependencies(): if dep is t: logging.critical('Other tasks are dependent on this one! Parallel processors will be held waiting!') if not options.execute_keep_going: raise finally: if locked: t.unlock()
def execution_loop(tasks, options): from time import sleep logging.info('Execute start (%s tasks)' % len(tasks)) while tasks: upnext = [] # tasks that can be run for i in range(int(options.execute_nr_wait_cycles)): max_cannot_run = min(len(tasks), 128) for i in range(max_cannot_run): # The argument for this is the following: # if T' is dependent on the result of T, it is better if the # processor that ran T, also runs T'. By having everyone else # push T' to the end of tasks, this is more likely to happen. # # Furthermore, this avoids always querying the same tasks. if tasks[0].can_run(): break tasks.append(tasks.pop(0)) while tasks and tasks[0].can_run(): upnext.append(tasks.pop(0)) if upnext: break for ti, t in enumerate(tasks): if t.can_run(): upnext.append(tasks.pop(ti)) break if upnext: break logging.info('waiting %s secs for an open task...' % options.execute_wait_cycle_time_secs) sleep(int(options.execute_wait_cycle_time_secs)) if not upnext: logging.info('No tasks can be run!') break for t in upnext: if t.can_load(): logging.info('Loadable %s...' % t.name) jug_hook('execute.task-loadable', (t, )) continue locked = False try: locked = t.lock() if t.can_load( ): # This can be true if the task ran between the check above and this one logging.info('Loadable %s...' % t.name) jug_hook('execute.task-loadable', (t, )) elif locked: logging.info('Executing %s...' % t.name) jug_hook('execute.task-pre-execute', (t, )) t.run(debug_mode=options.debug) jug_hook('execute.task-executed1', (t, )) else: logging.info('Already in execution %s...' % t.name) except SystemExit: raise except Exception as e: if options.pdb: import sys _, _, tb = sys.exc_info() # The code below is a complex attempt to load IPython # debugger which works with multiple versions of IPython. # # Unfortunately, their API kept changing prior to the 1.0. try: import IPython try: import IPython.core.debugger try: from IPython.terminal.ipapp import load_default_config config = load_default_config() colors = config.TerminalInteractiveShell.colors except: import IPython.core.ipapi ip = IPython.core.ipapi.get() colors = ip.colors debugger = IPython.core.debugger.Pdb(colors) except ImportError: #Fallback to older version of IPython API import IPython.ipapi import IPython.Debugger shell = IPython.Shell.IPShell(argv=['']) ip = IPython.ipapi.get() debugger = IPython.Debugger.Pdb(ip.options.colors) except ImportError: #Fallback to standard debugger import pdb debugger = pdb.Pdb() debugger.reset() debugger.interaction(None, tb) else: import itertools logging.critical('Exception while running %s: %s' % (t.name, e)) for other in itertools.chain(upnext, tasks): for dep in other.dependencies(): if dep is t: logging.critical( 'Other tasks are dependent on this one! Parallel processors will be held waiting!' ) if not options.execute_keep_going: raise finally: if locked: t.unlock()
def execution_loop(tasks, options): from time import sleep logging.info('Execute start (%s tasks)' % len(tasks)) # For the special (but common) case where most (if not all) of the tasks # can be loaded directly, just skip them as fast as possible: first_unloadable = 0 while (first_unloadable < len(tasks)) and tasks[first_unloadable].can_load(): t = tasks[first_unloadable] jug_hook('execute.task-loadable', (tasks[first_unloadable],)) first_unloadable += 1 del tasks[:first_unloadable] while tasks: upnext = [] # tasks that can be run nr_wait_cycles = int(options.execute_nr_wait_cycles) for i in range(nr_wait_cycles): max_cannot_run = min(len(tasks), 128) if i == nr_wait_cycles - 1: # in the last loop iteration, check all tasks to ensure we don't miss any max_cannot_run = len(tasks) for i in range(max_cannot_run): # The argument for this is the following: # if T' is dependent on the result of T, it is better if the # processor that ran T, also runs T'. By having everyone else # push T' to the end of tasks, this is more likely to happen. # # Furthermore, this avoids always querying the same tasks. if tasks[0].can_run(): break tasks.append(tasks.pop(0)) while tasks and tasks[0].can_run(): upnext.append(tasks.pop(0)) if upnext: break for ti,t in enumerate(tasks): if t.can_run(): upnext.append(tasks.pop(ti)) break if upnext: break logging.info('waiting %s secs for an open task...' % options.execute_wait_cycle_time_secs) sleep(int(options.execute_wait_cycle_time_secs)) if not upnext: logging.info('No tasks can be run!') break for t in upnext: if t.can_load(): jug_hook('execute.task-loadable', (t,)) continue locked = False try: locked = t.lock() if t.can_load(): # This can be true if the task ran between the check above and this one jug_hook('execute.task-loadable', (t,)) elif locked: logging.info('Executing %s...' % t.name) jug_hook('execute.task-pre-execute', (t,)) t.run(debug_mode=options.debug) jug_hook('execute.task-executed1', (t,)) else: logging.info('Already in execution %s...' % t.name) except SystemExit: raise except Exception as e: if options.pdb: import sys _,_, tb = sys.exc_info() # The code below is a complex attempt to load IPython # debugger which works with multiple versions of IPython. # # Unfortunately, their API kept changing prior to the 1.0. try: import IPython try: import IPython.core.debugger try: from IPython.terminal.ipapp import load_default_config config = load_default_config() colors = config.TerminalInteractiveShell.colors except: import IPython.core.ipapi ip = IPython.core.ipapi.get() colors = ip.colors try: debugger = IPython.core.debugger.Pdb(colors.get_value(initial='Linux')) except AttributeError: debugger = IPython.core.debugger.Pdb(colors) except ImportError: #Fallback to older version of IPython API import IPython.ipapi import IPython.Debugger shell = IPython.Shell.IPShell(argv=['']) ip = IPython.ipapi.get() debugger = IPython.Debugger.Pdb(ip.options.colors) except ImportError: #Fallback to standard debugger import pdb debugger = pdb.Pdb() debugger.reset() debugger.interaction(None, tb) else: import itertools logging.critical('Exception while running %s: %s' % (t.name,e)) for other in itertools.chain(upnext, tasks): for dep in other.dependencies(): if dep is t: logging.critical('Other tasks are dependent on this one! Parallel processors will be held waiting!') if not options.execute_keep_going: raise finally: if locked: t.unlock()