def handle(self, signum, frame): """This method is called when a signal is received.""" if self.print_method: self.print_method('\nProgram received signal %s' % self.signame) if self.print_stack: import traceback strings = traceback.format_stack(frame) for s in strings: if s[-1] == '\n': s = s[0:-1] self.print_method(s) if self.pass_along: # pass the signal to the program if self.old_handler: self.old_handler(signum, frame) if self.stop_method is not None: ## FIXME not sure if this is really right if frame.f_trace is None: import pydb pydb.debugger() else: self.stop_method(frame) pass pass return
def getOtherOperand(opNode, oneOperand): if fileId == "qx.data.marshal.Json": import pydb; pydb.debugger() operands = opNode.getChildren(True) #if operands[0] == oneOperand.parent: # switch between "first" and "second" if operands[0] == oneOperand: # switch between "first" and "second" #otherOperand = operands[1].getFirstChild(ignoreComments=True) otherOperand = operands[1] otherPosition = 1 else: #otherOperand = operands[0].getFirstChild(ignoreComments=True) otherOperand = operands[0] otherPosition = 0 return otherOperand, otherPosition
def processVariantGet(callNode, variantMap): treeModified = False # Simple sanity checks params = callNode.getChild("arguments") if len(params.children) != 1: log( "Warning", "Expecting exactly one argument for qx.core.Environment.get. Ignoring this occurrence.", params) return treeModified firstParam = params.getChildByPosition(0) if not isStringLiteral(firstParam): # warning is currently covered in parsing code #log("Warning", "First argument must be a string literal! Ignoring this occurrence.", firstParam) return treeModified # skipping "relative" calls like "a.b.qx.core.Environment.get()" #(with the new ast and the isEnvironmentCall check, this cannot happen anymore) #qxIdentifier = treeutil.selectNode(callNode, "operand/variable/identifier[1]") #if not treeutil.checkFirstChainChild(qxIdentifier): # log("Warning", "Skipping relative qx.core.Environment.get call. Ignoring this occurrence ('%s')." % treeutil.findChainRoot(qxIdentifier).toJavascript()) # return treeModified variantKey = firstParam.get("value") if variantKey in variantMap: confValue = variantMap[variantKey] else: return treeModified if variantKey == "qx.debug.databinding" and fileId == "qx.data.marshal.Json": import pydb pydb.debugger() # Replace the .get() with its value resultNode = reduceCall(callNode, confValue) treeModified = True # Reduce any potential operations with literals (+3, =='hugo', ?a:b, ...) treeMod = True while treeMod: resultNode, treeMod = reduceOperation(resultNode) # Reduce a potential condition _ = reduceLoop(resultNode) return treeModified
def processVariantGet(callNode, variantMap): treeModified = False # Simple sanity checks params = callNode.getChild("arguments") if len(params.children) != 1: log("Warning", "Expecting exactly one argument for qx.core.Environment.get. Ignoring this occurrence.", params) return treeModified firstParam = params.getChildByPosition(0) if not isStringLiteral(firstParam): # warning is currently covered in parsing code #log("Warning", "First argument must be a string literal! Ignoring this occurrence.", firstParam) return treeModified # skipping "relative" calls like "a.b.qx.core.Environment.get()" #(with the new ast and the isEnvironmentCall check, this cannot happen anymore) #qxIdentifier = treeutil.selectNode(callNode, "operand/variable/identifier[1]") #if not treeutil.checkFirstChainChild(qxIdentifier): # log("Warning", "Skipping relative qx.core.Environment.get call. Ignoring this occurrence ('%s')." % treeutil.findChainRoot(qxIdentifier).toJavascript()) # return treeModified variantKey = firstParam.get("value"); if variantKey in variantMap: confValue = variantMap[variantKey] else: return treeModified if variantKey =="qx.debug.databinding" and fileId == "qx.data.marshal.Json": import pydb; pydb.debugger() # Replace the .get() with its value resultNode = reduceCall(callNode, confValue) treeModified = True # Reduce any potential operations with literals (+3, =='hugo', ?a:b, ...) treeMod = True while treeMod: resultNode, treeMod = reduceOperation(resultNode) # Reduce a potential condition _ = reduceLoop(resultNode) return treeModified
#else: # out += ','.join([l.text for l in labels]) #try: output.write(out % voxel + "\n") #except: # import pydb # pydb.debugger() if args.dumpmapFile: try: ni_dump_data[coord_orig[0], coord_orig[1], coord_orig[2]] = \ [voxel['labels'][i]['label'].index for i,ind in enumerate(args.levels)] except Exception, e: import pydb pydb.debugger() # if we opened any file -- close it if fileIn: fileIn.close() if args.dumpmapFile: ni_dump = nb.Nifti1Image(ni_dump_data, None, ni_dump.get_header()) ni_dump.to_filename(args.dumpmapFile) if args.createSummary: if numVoxels == 0: verbose(1, "No matching voxels were found.") else: get_summary(args, summary, output)
"""Test to see if handles an exception when an error is raised.""" import os, sys top_builddir = ".." if top_builddir[-1] != os.path.sep: top_builddir += os.path.sep sys.path.insert(0, os.path.join(top_builddir, 'pydb')) top_srcdir = ".." if top_srcdir[-1] != os.path.sep: top_srcdir += os.path.sep sys.path.insert(0, os.path.join(top_srcdir, 'pydb')) print __doc__ import pydb pydb.debugger() # we go into the debugger print "Returning from the debugger" # Set a variable so we can show the state is # somewhere after the above set_trace() z='After set_trace' try: # The below statement should not enter the debugger x=2/0 except: pass # We should enter the debugger on the next statement. y=1/0 # Bullwinkle: This time, for sure! pass
#!/usr/bin/python """Towers of Hanoi - testing calling debugger with continue and cmds""" import sys sys.path.insert(0, '..') import pydb pydb.debugger(dbg_cmds=['set basename on', 'set linetrace on', 'step 20']) def hanoi(n, a, b, c): if n - 1 > 0: hanoi(n - 1, a, c, b) print "Move disk %s to %s" % (a, b) if n - 1 > 0: hanoi(n - 1, c, b, a) if __name__ == '__main__': i_args = len(sys.argv) if i_args != 1 and i_args != 2: print "*** Need number of disks or no parameter" sys.exit(1) n = 3 sys.settrace(None) if i_args > 1: try: n = int(sys.argv[1]) except ValueError, msg: print "** Expecting an integer, got: %s" % repr(sys.argv[1])
if i_args > 3: print "usage %s [disks [cmdfile]]" % sys.argv[0] sys.exit(1) n=3 if i_args > 1: try: n = int(sys.argv[1]) except ValueError, msg: print "** Expecting an integer, got: %s" % repr(sys.argv[1]) sys.exit(2) if n < 1 or n > 100: print "*** number of disks should be between 1 and 100" sys.exit(2) dbg_cmds=['set basename on', 'where', 'list', 'step', 'step', 'where', 'info locals', 'set linetrace on', 'continue'] import pydb pydb.debugger(dbg_cmds) hanoi(n, "a", "b", "c")
def main(): # XXX: I cannot figure out why I have to re-import `os` # here; it has already been imported at the start of the # file ... import os # disable core dumps resource.setrlimit(resource.RLIMIT_CORE, (0,0)) # parse command-line options import argparse parser = argparse.ArgumentParser( description=""" Actions: graphs G N Generate the graphs occurring in M_{g,n}. homology G N Print homology ranks of M_{g,n}. latex G N [-s DIR] [-o FILE] Read the listings of M_{g,n} fatgraphs (from directory DIR) and output a pretty-print catalogue of the graphs as LaTeX documents. valences G N Print the vertex valences occurring in M_{g,n} graphs. shell Start an interactive PyDB shell. selftest Run internal code tests and report failures. """, formatter_class=argparse.RawTextHelpFormatter) # positional arguments parser.add_argument('action', metavar='ACTION', default='help', help="Action to perform, see above.") parser.add_argument('args', metavar='ARG', nargs='*', help="Arguments depend on the actual action, see above.") # option arguments if not cython_compiled: parser.add_argument("-D", "--debug", nargs='?', dest="debug", default=None, const='debug', help="""Enable debug features: * pydb -- run Python debugger if an error occurs * profile -- dump profiler statistics in a .pf file. Several features may be enabled by separating them with a comma, as in '-D pydb,profile'.""") parser.add_argument("-l", "--logfile", action='store', dest='logfile', default=None, help="""Redirect log messages to the named file (by default log messages are output to STDERR).""") parser.add_argument("-o", "--output", dest="outfile", default=None, help="Save results into named file.") parser.add_argument("-s", "--checkpoint", dest="checkpoint_dir", default=None, help="Directory for saving computation state.") parser.add_argument("-u", "--afresh", dest="restart", action="store_false", default=True, help="Do NOT restart computation from the saved state in checkpoint directory.") parser.add_argument("-v", "--verbose", action="count", dest="verbose", default=0, help="Print informational and status messages as the computation goes on.") parser.add_argument('-V', '--version', action='version', version=__version__) cmdline = parser.parse_args() # make options available to loaded modules runtime.options = cmdline # print usage message if no args given if 'help' == cmdline.action: parser.print_help() sys.exit(0) # configure logging if cmdline.logfile is None: log_output = sys.stderr else: log_output = file(cmdline.logfile, 'a') if cmdline.verbose == 0: log_level = logging.ERROR elif cmdline.verbose == 1: log_level = logging.INFO else: log_level = logging.DEBUG logging.basicConfig(level=log_level, stream=log_output, format="%(asctime)s [%(levelname)s] %(message)s", datefmt="%H:%M:%S") # ensure the proper optimization level is selected if __debug__ and cmdline.debug is None: try: os.execl(sys.executable, *([sys.executable, '-O'] + sys.argv)) finally: logging.warning("Could not execute '%s', ignoring '-O' option." % str.join(" ", [sys.executable, '-O'] + sys.argv)) # enable optional features if not cython_compiled and cmdline.debug is not None: debug = cmdline.debug.split(",") if 'pydb' in debug: try: import pydb sys.excepthook = pydb.exception_hook logging.debug("PyDB enabled: exceptions will start a debugging session.") except ImportError: logging.warning("Could not import 'pydb' module - PyDB not enabled.") if 'profile' in debug: try: import hotshot pf = hotshot.Profile(__name__ + '.pf') pf.start() logging.debug("Started call profiling with 'hotshot' module.") except ImportError: logging.warning("Could not import 'hotshot' - call profiling *not* enabled.") # hack to allow 'N1,N2,...' or 'N1 N2 ...' syntaxes for (i, arg) in enumerate(cmdline.args): if arg.find(","): if arg.startswith('M'): cmdline.args[i:i+1] = arg[1:].split(",") else: cmdline.args[i:i+1] = arg.split(",") # open output file if cmdline.outfile is None: outfile = sys.stdout else: outfile = open(cmdline.outfile, 'w') # shell -- start interactive debugging shell if 'shell' == cmdline.action: if cython_compiled: logging.error("The 'shell' command is not available when compiled.") sys.exit(1) else: try: import pydb except ImportError: sys.stderr.write("ERROR: Could not import 'pydb' module - Aborting.\n") logging.warning("Could not import 'pydb' module - Aborting.") sys.exit(1) print ("""Starting interactive session (with PyDB %s). Any Python expression may be evaluated at the prompt. All symbols from modules `rg`, `homology`, `graph_homology` have already been imported into the main namespace. """ % pydb.__version__) pydb.debugger([ "from homology import *", "from graph_homology import *", "from rg import *", ]) # selftest -- run doctests and acceptance tests on simple cases elif 'selftest' == cmdline.action: failures = 0 import doctest import imp for module in [ 'rg', 'homology', 'graph_homology', 'combinatorics', 'iterators', 'cyclicseq', ]: try: module_file, pathname, description = imp.find_module(module, fatghol.__path__) m = imp.load_module(module, module_file, pathname, description) logging.debug("Running Python doctest on '%s' module..." % module) # temporarily turn off logging to avoid cluttering the output logging.getLogger().setLevel(logging.ERROR) # run doctests (failed, tested) = doctest.testmod(m, name=module, optionflags=doctest.NORMALIZE_WHITESPACE) # restore normal logging level logging.getLogger().setLevel(log_level) if failed>0: failures += 1 logging.error(" module '%s' FAILED %d tests out of %d." % (module, failed, tested)) sys.stdout.write("Module '%s' FAILED %d tests out of %d.\n" % (module, failed, tested)) else: if tested>0: logging.debug(" OK - module '%s' passed all doctests." % module) sys.stdout.write("Module '%s' OK, passed all doctests.\n" % module) else: logging.warning(" module '%s' had no doctests." % module) except Exception, ex: try: module_file.close() except: pass raise ex def run_homology_selftest(output=sys.stdout): ok = True # second, try known cases and inspect results for (g, n, ok) in [ (0,3, [1,0,0]), (0,4, [1,2,0,0,0,0]), (0,5, [1,5,6,0,0,0,0,0,0]), (1,1, [1,0,0]), (1,2, [1,0,0,0,0,0]), (2,1, [1,0,1,0,0,0,0,0,0]), ]: output.write(" Computation of M_{%d,%d} homology: " % (g,n)) # compute homology of M_{g,n} timing.start("homology M%d,%d" % (g,n)) hs = compute_homology(g,n) timing.stop("homology M%d,%d" % (g,n)) # check result if hs == ok: output.write("OK (elapsed: %0.3fs)\n" % timing.get("homology M%d,%d" % (g,n))) else: logging.error("Computation of M_{%d,%d} homology: FAILED, got %s expected %s" % (g,n,hs,ok)) output.write("FAILED, got %s expected %s\n" % (hs,ok)) ok = False return ok ## run the self-test suite 3 times: ## ## 1. without any persistence stuff enabled, so we can test the ## real results of the algorithm and the performance sys.stdout.write("Checking homology algorithm (no checkpointing)\n") try: del runtime.options.checkpoint_dir except AttributeError: pass ok = run_homology_selftest(sys.stdout) if not ok: failures += 1 ## 2. run with a temporary checkpoint directory, to test saving of state sys.stdout.write("Checking homology algorithm (checkpointing)\n") runtime.options.checkpoint_dir = tempfile.mkdtemp(prefix="mgn.selftest.") ok = run_homology_selftest(sys.stdout) if not ok: failures += 1 ## 3. run with the same temporary directory, to test that ## persistence picks up results of rpevious runs correctly sys.stdout.write("Checking homology algorithm (restoring from checkpointed state)\n") ok = run_homology_selftest(sys.stdout) if not ok: failures += 1 # remove anything in the temporary directory if failures == 0: for entry in os.listdir(runtime.options.checkpoint_dir): os.remove(os.path.join(runtime.options.checkpoint_dir, entry)) os.rmdir(runtime.options.checkpoint_dir) else: sys.stdout.write("Persisted files left in directory '%s'" % runtime.options.checkpoint_dir) # exit code >0 is number of failures sys.exit(failures)
def run(args): #atlas.relativeToOrigin = args.coordRelativeToOrigin fileIn = None coordT = None niftiInput = None # define data type for coordinates if args.input_voxels: ctype = int query_voxel = True else: ctype = float query_voxel = False # Setup coordinates read-in volQForm = None # # compatibility with older talairachlabel if args.inputCoordFile: fileIn = open(args.inputCoordFile) coordsIterator = parsed_coordinates_iterator( args.inputLineFormat, fileIn, ctype=ctype) if args.inputVolFile: infile = args.inputVolFile # got a volume/file to process if __debug__: debug('ATL', "Testing if 0th element in the list a volume") niftiInput = None try: niftiInput = nb.load(infile) if __debug__: debug('ATL', "Yes it is") except Exception as e: if __debug__: debug('ATL', "No it is not due to %s. Trying to parse the file" % e) if niftiInput: # if we got here -- it is a proper volume # XXX ask Michael to remove nasty warning message coordsIterator = select_from_volume_iterator( infile, args.lowerThreshold, args.upperThreshold) assert(coordT is None) coordT = Linear(niftiInput.header.get_qform()) # lets store volumeQForm for possible conversion of voxels into coordinates volQForm = coordT # previous iterator returns space coordinates args.coordRelativeToOrigin = True else: raise ValueError('could not open volumetric input file') # input is stdin else: coordsIterator = parsed_coordinates_iterator( args.inputLineFormat, ctype=ctype) # Open and initialize atlas lookup if args.atlasFile is None: if args.atlasPath is None: args.atlasPath = KNOWN_ATLASES[args.atlasName] args.atlasFile = args.atlasPath % ( {'name': args.atlasName} ) akwargs_common = {} if args.atlasImageFile: akwargs_common['image_file'] = args.atlasImageFile if not args.forbidDirectMapping \ and niftiInput is not None and not args.transformationFile: akwargs = {'resolution': niftiInput.header.get_zooms()[0]} query_voxel = True # if we can query directly by voxel, do so akwargs.update(akwargs_common) verbose(1, "Will attempt direct mapping from input voxels into atlas " "voxels at resolution %.2f" % akwargs['resolution']) atlas = Atlas(args.atlasFile, **akwargs) # verify that we got the same qforms in atlas and in the data file if atlas.space != args.inputSpace: verbose(0, "Cannot do direct mapping between input image in %s space and" " atlas in %s space. Use -I switch to override input space if" " it misspecified, or use -T to provide transformation. Trying" " to proceed" %(args.inputSpace, atlas.space)) query_voxel = False elif not (niftiInput.header.get_qform() == atlas._image.header.get_qform()).all(): if args.atlasImageFile is None: warning( "Cannot do direct mapping between files with different qforms." " Please provide original transformation (-T)." "\n Input qform:\n%s\n Atlas qform: \n%s" %(niftiInput.header.get_qform(), atlas._image.header.get_qform), 1) # reset ability to query by voxels query_voxel = False else: warning( "QForms are different between input image and " "provided atlas image." "\n Input qform of %s:\n%s\n Atlas qform of %s:\n%s" %(infile, niftiInput.header.get_qform(), args.atlasImageFile, atlas._image.header.get_qform()), 1) else: coordT = None else: atlas = Atlas(args.atlasFile, **akwargs_common) if isinstance(atlas, ReferencesAtlas): args.referenceLevel = args.referenceLevel.replace('/', ' ') atlas.set_reference_level(args.referenceLevel) atlas.distance = args.maxDistance else: args.showReferencedCoordinates = False if isinstance(atlas, FSLProbabilisticAtlas): atlas.strategy = args.probStrategy atlas.thr = args.probThr ## If not in Talairach -- in MNI with voxel size 2x2x2 # Original talairachlabel assumed that if respective to origin -- voxels were # scaled already. #if args.coordInTalairachSpace: # voxelSizeOriginal = np.array([1, 1, 1]) #else: # voxelSizeOriginal = np.array([2, 2, 2]) if args.coordInTalairachSpace: args.inputSpace = "Talairach" if not (args.inputSpace == atlas.space or (args.inputSpace in ["MNI", "Talairach"] and atlas.space == "Talairach")): raise XMLAtlasException("Unknown space '%s' which is not the same as atlas " "space '%s' either" % ( args.inputSpace, atlas.space )) if query_voxel: # we do direct mapping coordT = None else: verbose(2, "Chaining needed transformations") # by default -- no transformation if args.transformationFile: #externals.exists('scipy', raise_=True) # scipy.io.read_array was deprecated a while back (around 0.8.0) from numpy import loadtxt transfMatrix = loadtxt(args.transformationFile) coordT = Linear(transfMatrix, previous=coordT) verbose(2, "coordT got linear transformation from file %s" % args.transformationFile) voxelOriginOriginal = None voxelSizeOriginal = None if not args.coordRelativeToOrigin: if args.inputSpace == "Talairach": # assume that atlas is in Talairach space already voxelOriginOriginal = atlas.origin voxelSizeOriginal = np.array([1, 1, 1]) elif args.inputSpace == "MNI": # need to adjust for MNI origin as it was thought to be at # in terms of voxels #voxelOriginOriginal = np.array([46, 64, 37]) voxelOriginOriginal = np.array([45, 63, 36]) voxelSizeOriginal = np.array([2.0, 2.0, 2.0]) warning("Assuming elderly sizes for MNI volumes with" " origin %s and sizes %s" %\ ( repr(voxelOriginOriginal), repr(voxelSizeOriginal))) if not (voxelOriginOriginal is None and voxelSizeOriginal is None): verbose(2, "Assigning origin adjusting transformation with"+\ " origin=%s and voxelSize=%s" %\ ( repr(voxelOriginOriginal), repr(voxelSizeOriginal))) coordT = SpaceTransformation(origin=voxelOriginOriginal, voxelSize=voxelSizeOriginal, to_real_space=True, previous=coordT) # besides adjusting for different origin we need to transform into # Talairach space if args.inputSpace == "MNI" and atlas.space == "Talairach": verbose(2, "Assigning transformation %s" % args.MNI2TalTransformation) # What transformation to use coordT = {"matthewbrett": MNI2Tal_MatthewBrett, "lancaster07fsl": mni_to_tal_lancaster07_fsl, "lancaster07pooled": mni_to_tal_lancaster07pooled, "meyerlindenberg98": mni_to_tal_meyer_lindenberg98, "yohflirt": mni_to_tal_yohflirt }\ [args.MNI2TalTransformation](previous=coordT) if args.inputSpace == "MNI" and args.halfVoxelCorrection: originCorrection = np.array([0.5, 0.5, 0.5]) else: # perform transformation any way to convert to voxel space (integers) originCorrection = None # To be closer to what original talairachlabel did -- add 0.5 to each coord coordT = SpaceTransformation(origin=originCorrection, voxelSize=None, to_real_space=False, previous = coordT) if args.createSummary: summary = {} if args.levels is None: args.levels = str(min(4, atlas.nlevels-1)) if args.levels is None: args.levels = list(range(atlas.nlevels)) elif isinstance(args.levels, str): if args.levels == 'list': print("Known levels and their indicies:\n" + atlas.levels_listing()) sys.exit(0) slevels = args.levels.split(',') args.levels = [] for level in slevels: try: int_level = int(level) except: if level in atlas.levels: int_level = atlas.levels[level].index else: raise RuntimeError( "Unknown level '%s'. " % level + "Known levels and their indicies:\n" + atlas.levels_listing()) args.levels += [int_level] else: raise ValueError("Don't know how to handle list of levels %s." "Example is '1,2,3'" % (args.levels,)) verbose(3, "Operating on following levels: %s" % args.levels) # assign levels to the atlas atlas.default_levels = args.levels if args.outputFile: output = open(args.outputFile, 'w') else: output = sys.stdout # validity check if args.dumpmapFile: if niftiInput is None: raise RuntimeError("You asked to dump indexes into the volume, " \ "but input wasn't a volume") sys.exit(1) ni_dump = nb.load(infile) ni_dump_data = np.zeros(ni_dump.header.get_data_shape()[:3] + (len(args.levels),)) # Also check if we have provided voxels but not querying by voxels if args.input_voxels: if coordT is not None: raise NotImplementedError("Cannot perform voxels querying having coordT defined") if not query_voxel: raise NotImplementedError("query_voxel was reset to False, can't do queries by voxel") # Read coordinates numVoxels = 0 for c in coordsIterator: value, coord_orig, t = c[0], c[1:4], c[4] if __debug__: debug('ATL', "Obtained coord_orig=%s with value %s" % (repr(coord_orig), value)) lt, ut = args.lowerThreshold, args.upperThreshold if lt is not None and value < lt: verbose(5, "Value %s is less than lower threshold %s, thus voxel " "is skipped" % (value, args.lowerThreshold)) continue if ut is not None and value > ut: verbose(5, "Value %s is greater than upper threshold %s, thus voxel " "is skipped" % (value, args.upperThreshold)) continue numVoxels += 1 # Apply necessary transformations coord = coord_orig = np.array(coord_orig) if coordT: coord = coordT[ coord_orig ] # Query label if query_voxel: voxel = atlas[coord] else: voxel = atlas(coord) voxel['coord_orig'] = coord_orig voxel['value'] = value voxel['t'] = t if args.createSummary: summaryIndex = "" voxel_labels = voxel["labels"] for i,ind in enumerate(args.levels): voxel_label = voxel_labels[i] text = present_labels(args, voxel_label) #if len(voxel_label): # assert(voxel_label['index'] == ind) summaryIndex += text + " / " if not summaryIndex in summary: summary[summaryIndex] = {'values':[], 'max':value, 'maxcoord':coord_orig} if 'voxel_referenced' in voxel: summary[summaryIndex]['distances'] = [] summary_ = summary[summaryIndex] summary_['values'].append(value) if summary_['max'] < value: summary_['max'] = value summary_['maxcoord'] = coord_orig if 'voxel_referenced' in voxel: if voxel['voxel_referenced'] and voxel['distance']>=1e-3: verbose(5, 'Appending distance %e for voxel at %s' % (voxel['distance'], voxel['coord_orig'])) summary_['distances'].append(voxel['distance']) else: # Display while reading/processing first, out = True, "" if args.showValues: out += "%(value)5.2f " if args.showOriginalCoordinates: out += "%(coord_orig)s ->" if args.showReferencedCoordinates: out += " %(voxel_referenced)s=>%(distance).2f=>%(voxel_queried)s ->" if args.showTargetCoordinates: out += " %(coord_queried)s: " #out += "(%d,%d,%d): " % tuple(map(lambda x:int(round(x)),coord)) if args.showTargetVoxel: out += " %(voxel_queried)s ->" if args.levels is None: args.levels = list(range(len(voxel['labels']))) labels = [present_labels(args, voxel['labels'][i]) for i in args.levels] out += ','.join(labels) #if args.abbreviatedLabels: # out += ','.join([l.abbr for l in labels]) #else: # out += ','.join([l.text for l in labels]) #try: output.write(out % voxel + "\n") #except: # import pydb # pydb.debugger() if args.dumpmapFile: try: ni_dump_data[coord_orig[0], coord_orig[1], coord_orig[2]] = \ [voxel['labels'][i]['label'].index for i,ind in enumerate(args.levels)] except Exception as e: import pydb pydb.debugger() # if we opened any file -- close it if fileIn: fileIn.close() if args.dumpmapFile: ni_dump = nb.Nifti1Image(ni_dump_data, None, ni_dump.header) ni_dump.to_filename(args.dumpmapFile) if args.createSummary: if numVoxels == 0: verbose(1, "No matching voxels were found.") else: get_summary(args, summary, output) if args.outputFile: output.close()
#!/usr/bin/python """Towers of Hanoi - testing calling debugger with continue and cmds""" import sys sys.path.insert(0, '..') import pydb pydb.debugger(dbg_cmds=['set basename on', 'set linetrace on', 'step 20']) def hanoi(n,a,b,c): if n-1 > 0: hanoi(n-1, a, c, b) print "Move disk %s to %s" % (a, b) if n-1 > 0: hanoi(n-1, c, b, a) if __name__=='__main__': i_args=len(sys.argv) if i_args != 1 and i_args != 2: print "*** Need number of disks or no parameter" sys.exit(1) n=3 sys.settrace(None) if i_args > 1: try: n = int(sys.argv[1]) except ValueError, msg: print "** Expecting an integer, got: %s" % repr(sys.argv[1]) sys.exit(2)