示例#1
0
def debug(f, *args, **kwargs):
    """
    Allows the user to launch pdb in any function.

    Parameters
    ----------
    f: function
        The function you wish to debug

    args:
        The arguments that need to be passed to f

    kwargs:
        Named arguments that must be passed to f

    Returns
    -------
    None

    Notes
    -----
    Taken from Wes McKinney's book Python for Data Analysis
    """
    from IPython.core.debugger import Pdb
    pdb = Pdb(color_scheme='Linux')
    return pdb.runcall(f, *args, **kwargs)
示例#2
0
文件: ipdb.py 项目: pkimber/my-memory
def post_mortem(tb):
    ip = ipapi.get()
    def_colors = ip.colors
    p = Pdb(def_colors)
    p.reset()
    while tb.tb_next is not None:
        tb = tb.tb_next
    p.interaction(tb.tb_frame, tb)
示例#3
0
def post_mortem(tb):
    update_stdout()
    wrap_sys_excepthook()
    p = Pdb(def_colors)
    p.reset()
    if tb is None:
        return
    p.interaction(None, tb)
示例#4
0
文件: bug.py 项目: jagguli/PyBug
def trace():
    """like pdb.set_trace() except sets a breakpoint for the next line
    
    works with nose testing framework (uses sys.__stdout__ for output)
    """
    frame = sys._getframe().f_back
    pdb = Pdb(color_scheme='Linux', stdout=sys.__stdout__)
    pdb.set_trace(frame)
示例#5
0
文件: __main__.py 项目: dmsul/ipdb
def post_mortem(tb):
    update_stdout()
    wrap_sys_excepthook()
    p = Pdb(def_colors)
    p.reset()
    if tb is None:
        return
    while tb.tb_next is not None:
        tb = tb.tb_next
    p.interaction(tb.tb_frame, tb)
示例#6
0
def debug(f, *args, **kwargs):
    from pdb import Pdb as OldPdb
    try:
        from IPython.core.debugger import Pdb
        kw = dict(color_scheme='Linux')
    except ImportError:
        Pdb = OldPdb
        kw = {}
    pdb = Pdb(**kw)
    return pdb.runcall(f, *args, **kwargs)
示例#7
0
def post_mortem(tb):
    update_stdout()
    sys.excepthook = lambda et, ev, tb, orig_hook=sys.excepthook: \
      BdbQuit_excepthook(et, ev, tb, orig_hook)
    p = Pdb(def_colors)
    p.reset()
    if tb is None:
        return
    while tb.tb_next is not None:
        tb = tb.tb_next
    p.interaction(tb.tb_frame, tb)
示例#8
0
文件: __main__.py 项目: moon2l/ipdb
def post_mortem(tb):
    update_stdout()
    BdbQuit_excepthook.excepthook_ori = sys.excepthook
    sys.excepthook = BdbQuit_excepthook
    p = Pdb(def_colors)
    p.reset()
    if tb is None:
        return
    while tb.tb_next is not None:
        tb = tb.tb_next
    p.interaction(tb.tb_frame, tb)
示例#9
0
class IPdbKernel(Kernel):
    implementation = 'IPdbKernel'
    implementation_version = '0.1'
    language = 'IPdb'
    language_version = '0.1'
    language_info = {'mimetype': 'text/plain'}
    banner = "IPython debugger kernel"

    def __init__(self, **kwargs):
        Kernel.__init__(self, **kwargs)

        # Instantiate IPython.core.debugger.Pdb here, pass it a phony 
        # stdout that provides a dummy flush() method and a write() method
        # that internally sends data using a function so that it can
        # be initialized to use self.send_response()
        write_func = lambda s: self.send_response(self.iopub_socket,
                                                  'stream',
                                                  {'name': 'stdout',
                                                   'text': s})
        sys.excepthook = functools.partial(BdbQuit_excepthook,
                                           excepthook=sys.excepthook)
        self.debugger = Pdb(stdout=PhonyStdout(write_func))
        self.debugger.set_trace(sys._getframe().f_back)

    def do_execute(self, code, silent, store_history=True,
                   user_expressions=None, allow_stdin=False):
        if not code.strip():
            return {'status': 'ok', 'execution_count': self.execution_count,
                    'payload': [], 'user_expressions': {}}

        # Process command:
        line = self.debugger.precmd(code)
        stop = self.debugger.onecmd(line)
        stop = self.debugger.postcmd(stop, line)
        if stop:
            self.debugger.postloop()

        return {'status': 'ok', 'execution_count': self.execution_count,
                'payload': [], 'user_expression': {}}

    def do_complete(self, code, cursor_pos):
        code = code[:cursor_pos]

        default = {'matches': [], 'cursor_start': 0,
                   'cursor_end': cursor_pos, 'metadata': dict(),
                   'status': 'ok'}

        if not code or code[-1] == ' ':
            return default

        # Run Pdb.completenames on code, extend matches with results:
        matches = self.debugger.completenames(code)

        if not matches:
            return default

        return {'matches': sorted(matches), 'cursor_start': cursor_pos-len(code),
                'cursor_end': cursor_pos, 'metadata': dict(),
                'status': 'ok'}
示例#10
0
    def do_list_relative(self, arg):
        try:
            relative_amount = int(arg)
        except:
            try:
                relative_amount = eval(arg, self.curframe.f_globals, self.curframe.f_locals) or 5
                if isinstance(relative_amount, tuple):
                    return Pdb.do_list(self, arg)
            except:
                relative_amount = 5

        top = max(1, self.curframe.f_lineno - relative_amount)
        bottom = min(len(open(self.curframe.f_code.co_filename, 'rb').readlines()),
                     self.curframe.f_lineno + relative_amount)
        return Pdb.do_list(self, '{0},{1}'.format(top, bottom))
示例#11
0
文件: tools.py 项目: Doik/micropsi2
def interactive_pdb():
    """ inspect the traceback of the most recent exception. """
    import traceback
    from IPython.core.debugger import Pdb

    if last_tb is None:
        print("Nothing to debug.")
        return

    # print to stdout (sic!) what this pdb session is about
    msg = traceback.format_tb(last_tb)[-5:]
    print('Starting PDB session for:\n\n\033[94m%s\033[0m\n' % ''.join(msg))

    pdb = Pdb()
    pdb.reset()
    pdb.interaction(frame=None, traceback=last_tb)
示例#12
0
def main():
    if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
        print "usage: ipdb.py scriptfile [arg] ..."
        sys.exit(2)

    mainpyfile = sys.argv[1]     # Get script filename
    if not os.path.exists(mainpyfile):
        print 'Error:', mainpyfile, 'does not exist'
        sys.exit(1)

    del sys.argv[0]         # Hide "pdb.py" from argument list

    # Replace pdb's dir with script's dir in front of module search path.
    sys.path[0] = os.path.dirname(mainpyfile)

    # Note on saving/restoring sys.argv: it's a good idea when sys.argv was
    # modified by the script being debugged. It's a bad idea when it was
    # changed by the user from the command line. There is a "restart" command
    # which allows explicit specification of command line arguments.
    pdb = Pdb(def_colors)
    while 1:
        try:
            pdb._runscript(mainpyfile)
            if pdb._user_requested_quit:
                break
            print "The program finished and will be restarted"
        except Restart:
            print "Restarting", mainpyfile, "with arguments:"
            print "\t" + " ".join(sys.argv[1:])
        except SystemExit:
            # In most cases SystemExit does not warrant a post-mortem session.
            print "The program exited via sys.exit(). Exit status: ",
            print sys.exc_info()[1]
        except:
            traceback.print_exc()
            print "Uncaught exception. Entering post mortem debugging"
            print "Running 'cont' or 'step' will restart the program"
            t = sys.exc_info()[2]
            pdb.interaction(None, t)
            print "Post mortem debugger finished. The " + mainpyfile + \
                  " will be restarted"
示例#13
0
def debug_shell(user_ns, user_global_ns, traceback=None, execWrapper=None):
    ipshell = None
    if traceback:
        try:
            from IPython.core.debugger import Pdb
            from IPython.terminal.ipapp import TerminalIPythonApp
            ipapp = TerminalIPythonApp.instance()
            ipapp.interact = False  # Avoid output (banner, prints)
            ipapp.initialize(argv=[])
            def_colors = ipapp.shell.colors
            pdb_obj = Pdb(def_colors)
            pdb_obj.botframe = None  # not sure. exception otherwise at quit
            ipshell = lambda: pdb_obj.interaction(None, traceback=traceback)
        except Exception:
            pass
    if not ipshell:
        try:
            import IPython
            import IPython.terminal.embed
            class DummyMod(object): pass
            module = DummyMod()
            module.__dict__ = user_global_ns
            module.__name__ = "DummyMod"
            ipshell = IPython.terminal.embed.InteractiveShellEmbed(
                user_ns=user_ns, user_module=module)
        except Exception:
            pass
        else:
            if execWrapper:
                old = ipshell.run_code
                ipshell.run_code = lambda code: execWrapper(lambda: old(code))
    if ipshell:
        ipshell()
    else:
        if traceback:
            import pdb
            pdb.post_mortem(traceback)
        else:
            simple_debug_shell(user_global_ns, user_ns)
示例#14
0
 def precmd(self, line):
     line = Pdb.precmd(self, line)
     if line:
         if line.endswith('??'):
             line = 'pinfo2 {0}'.format(line[:-2])
         elif line.endswith('?!'):
             line = 'psource {0}'.format(line[:-2])
         elif line.endswith('?@'):
             line = 'pdef {0}'.format(line[:-2])
         elif line.endswith('?'):
             line = 'pinfo {0}'.format(line[:-1])
         elif line.startswith('!'):
             line = 'forcecommandmagic {0}'.format(line[1:])
     return line
def main():
  """ This is the main entry point for training and testing your classifier. """
  classifier = Classifier()
  experiment = Experiment(classifier)
  experiment.train('training')
  
  # Sanity check. Should get 100% on the training images. 
  report = experiment.test('training')
  report.print_summary()
  
  Pdb.set_trace()

  test_datasets = 'translations rotations scales noise occlusion distortion blurry_checkers'
  final_report = ClassificationReport("All Datasets")
  
  # Print the classification results of each test
  for dataset in test_datasets.split():
    report = experiment.test('testing/' + dataset)
    report.print_summary()
    #report.print_errors() # Uncomment this to print the error images for debugging. 
    final_report.extend(report)
  
  final_report.print_summary()
示例#16
0
    def __init__(self, **kwargs):
        Kernel.__init__(self, **kwargs)

        # Instantiate IPython.core.debugger.Pdb here, pass it a phony 
        # stdout that provides a dummy flush() method and a write() method
        # that internally sends data using a function so that it can
        # be initialized to use self.send_response()
        write_func = lambda s: self.send_response(self.iopub_socket,
                                                  'stream',
                                                  {'name': 'stdout',
                                                   'text': s})
        sys.excepthook = functools.partial(BdbQuit_excepthook,
                                           excepthook=sys.excepthook)
        self.debugger = Pdb(stdout=PhonyStdout(write_func))
        self.debugger.set_trace(sys._getframe().f_back)
示例#17
0
 def __init__(self, *args, **kwargs):
     Pdb.__init__(self, *args, **kwargs)
     self._ptcomp = None
     self.pt_init()
示例#18
0
 def __init__(self, *args, **kwargs):
     Pdb.__init__(self, *args, **kwargs)
     self._ptcomp = None
     self.pt_init()
示例#19
0
def batch_block(config, readers, window, overwrite=False):
    import logging

    import numpy as np

    from yatsm import io
    from yatsm.results import HDF5ResultsStore
    from yatsm.pipeline import Pipe

    logger = logging.getLogger('yatsm')

    def sel_pix(pipe, y, x):
        return Pipe(data=pipe['data'].sel(y=y, x=x),
                    record=pipe.get('record', None))

    logger.info('Working on window: {}'.format(window))
    data = io.read_and_preprocess(config['data']['datasets'],
                                  readers,
                                  window,
                                  out=None)

    store_kwds = {
        'window': window,
        'reader': config.primary_reader,
        'root': config['results']['output'],
        'pattern': config['results']['output_prefix'],
    }

    # TODO: guess for number of records to store
    # from IPython.core.debugger import Pdb; Pdb().set_trace()
    with HDF5ResultsStore.from_window(**store_kwds) as store:
        # TODO: read this from pre-existing results
        pipe = Pipe(data=data)
        pipeline = config.get_pipeline(pipe, overwrite=overwrite)
        from IPython.core.debugger import Pdb
        Pdb().set_trace()

        # TODO: finish checking for resume
        if store.completed(pipeline) and not overwrite:
            logger.info('Already completed: {}'.format(store.filename))
            return

        pipe = pipeline.run_eager(pipe)

        record_results = defaultdict(list)
        n_ = data.y.shape[0] * data.x.shape[0]
        for i, (y, x) in enumerate(product(data.y.values, data.x.values)):
            logger.debug('Processing pixel {pct:>4.2f}%: y/x {y}/{x}'.format(
                pct=i / n_ * 100, y=y, x=x))
            pix_pipe = sel_pix(pipe, y, x)

            result = pipeline.run(pix_pipe, check_eager=False)

            # TODO: figure out what to do with 'data' results
            for k, v in result['record'].items():
                record_results[k].append(v)

        for name, result in record_results.items():
            record_results[name] = np.concatenate(result)

        if record_results:
            store.write_result(pipeline, record_results, overwrite=overwrite)
        # TODO: write out cached data
        return store.filename
示例#20
0
文件: core.py 项目: wtak23/pytak
def debug(f, *args, **kwargs):
    """ from wes mckiness book """
    from IPython.core.debugger import Pdb
    pdb = Pdb(color_scheme='Linux')
    return pdb.runcall(f, *args, **kwargs)
示例#21
0
def debug(f,*args,**kargs): from IPython.core.debugger import Pdb; pdb=Pdb(color_scheme='Linux'); return pdb.runcall(f,*args,**kargs) # dbg_run_fn
def fn1(): a1=1.1; b1='b1_'; print 'in fn1'; fn2(); return
示例#22
0
文件: trace.py 项目: drupel/sage
def trace(code, preparse=True):
    r"""
    Evaluate Sage code using the interactive tracer and return the
    result. The string ``code`` must be a valid expression
    enclosed in quotes (no assignments - the result of the expression
    is returned). In the Sage notebook this just raises a
    NotImplementedException.

    INPUT:


    -  ``code`` - str

    -  ``preparse`` - bool (default: True); if True, run
       expression through the Sage preparser.


    REMARKS: This function is extremely powerful! For example, if you
    want to step through each line of execution of, e.g.,
    ``factor(100)``, type

    ::

        sage: trace("factor(100)")             # not tested

    then at the (Pdb) prompt type ``s`` (or ``step``), then press return
    over and over to step through every line of Python that is called
    in the course of the above computation. Type ``?`` at any time for
    help on how to use the debugger (e.g., ``l`` lists 11 lines around
    the current line; ``bt`` gives a back trace, etc.).

    Setting a break point: If you have some code in a file and would
    like to drop into the debugger at a given point, put the following
    code at that point in the file:

    ``import pdb; pdb.set_trace()``

    For an article on how to use the Python debugger, see
    http://www.onlamp.com/pub/a/python/2005/09/01/debugger.html

    TESTS:

    The only real way to test this is via pexpect spawning a
    sage subprocess that uses IPython.

    ::

        sage: import pexpect
        sage: s = pexpect.spawn('sage')
        sage: _ = s.sendline("trace('print(factor(10))'); print(3+97)")
        sage: _ = s.sendline("s"); _ = s.sendline("c");
        sage: _ = s.expect('100', timeout=90)

    Seeing the ipdb prompt and the 2 \* 5 in the output below is a
    strong indication that the trace command worked correctly.

    ::

        sage: print(s.before[s.before.find('--'):])
        --...
        ipdb> c
        2 * 5

    We test what happens in notebook embedded mode::

        sage: sage.plot.plot.EMBEDDED_MODE = True
        sage: trace('print(factor(10))')
        Traceback (most recent call last):
        ...
        NotImplementedError: the trace command is not implemented in the Sage notebook; you must use the command line.
    """
    from sage.plot.plot import EMBEDDED_MODE
    if EMBEDDED_MODE:
        raise NotImplementedError("the trace command is not implemented in the Sage notebook; you must use the command line.")

    from IPython.core.debugger import Pdb
    pdb = Pdb()

    try:
        ipython = get_ipython()
    except NameError:
        raise NotImplementedError("the trace command can only be run from the Sage command-line")

    from sage.repl.preparse import preparse
    code = preparse(code)
    return pdb.run(code, ipython.user_ns)
示例#23
0
def run(statement, globals=None, locals=None):
    Pdb(def_colors).run(statement, globals, locals)
示例#24
0
 def debug(f, *args, **kwargs):
     pdb = Pdb(color_scheme='Linux')
     return pdb.runcall(f, *args, **kwargs)
示例#25
0
def runeval(expression, globals=None, locals=None):
    return Pdb(def_colors).runeval(expression, globals, locals)
示例#26
0
def set_trace(frame=None):
    update_stdout()
    wrap_sys_excepthook()
    if frame is None:
        frame = sys._getframe().f_back
    Pdb(def_colors).set_trace(frame)
示例#27
0
    def analysisf(self,
                  fwav,
                  ff0,
                  f0_min,
                  f0_max,
                  fspec,
                  faper,
                  fvuv,
                  preproc_hp=None):
        print('Extracting WORLD features from: ' + fwav)

        wav, fs, _ = sp.wavread(fwav)

        if preproc_hp == 'auto': preproc_hp = f0_min
        self.preprocwav(wav, fs, highpass=preproc_hp)

        import pyworld as pw

        if 0:
            # Check direct copy re-synthesis without compression/encoding
            print(pw.__file__)
            # _f0, ts = pw.dio(wav, fs, f0_floor=f0_min, f0_ceil=f0_max, channels_in_octave=2, frame_period=self.shift*1000.0)
            _f0, ts = pw.dio(wav,
                             fs,
                             f0_floor=f0_min,
                             f0_ceil=f0_max,
                             channels_in_octave=2,
                             frame_period=self.shift * 1000.0)
            # _f0, ts = pw.harvest(wav, fs)
            f0 = pw.stonemask(wav, _f0, ts, fs)
            SPEC = pw.cheaptrick(wav, f0, ts, fs, fft_size=self.dftlen)
            APER = pw.d4c(wav, f0, ts, fs, fft_size=self.dftlen)
            resyn = pw.synthesize(f0.astype('float64'), SPEC.astype('float64'),
                                  APER.astype('float64'), fs,
                                  self.shift * 1000.0)
            sp.wavwrite('resynth.wav',
                        resyn,
                        fs,
                        norm_abs=True,
                        force_norm_abs=True,
                        verbose=1)
            from IPython.core.debugger import Pdb
            Pdb().set_trace()

        _f0, ts = pw.dio(wav,
                         fs,
                         f0_floor=f0_min,
                         f0_ceil=f0_max,
                         channels_in_octave=2,
                         frame_period=self.shift * 1000.0)
        f0 = pw.stonemask(wav, _f0, ts, fs)
        SPEC = pw.cheaptrick(wav, f0, ts, fs, fft_size=self.dftlen)
        # SPEC = 10.0*np.sqrt(SPEC) # TODO Best gain correction I could find. Hard to find the good one between PML and WORLD different syntheses
        APER = pw.d4c(wav, f0, ts, fs, fft_size=self.dftlen)

        unvoiced = np.where(f0 < 20)[0]
        f0 = np.interp(ts, ts[f0 > 0], f0[f0 > 0])
        f0 = np.log(f0)
        makedirs(os.path.dirname(ff0))
        f0.astype('float32').tofile(ff0)

        vuv = np.ones(len(f0))
        vuv[unvoiced] = 0
        makedirs(os.path.dirname(fvuv))
        vuv.astype('float32').tofile(fvuv)

        SPEC = self.compress_spectrum(SPEC, fs, self.spec_size)
        makedirs(os.path.dirname(fspec))
        SPEC.astype('float32').tofile(fspec)

        APER = sp.linbnd2fwbnd(APER, fs, self.dftlen, self.aper_size)
        APER = sp.mag2db(APER)
        makedirs(os.path.dirname(faper))
        APER.astype('float32').tofile(faper)

        # CMP = np.concatenate((f0.reshape((-1,1)), SPEC, APER, vuv.reshape((-1,1))), axis=1) # (This is not a necessity)

        if 0:
            import matplotlib.pyplot as plt
            plt.ion()
            resyn = self.synthesis(fs, CMP)
            sp.wavwrite('resynth.wav',
                        resyn,
                        fs,
                        norm_abs=True,
                        force_norm_abs=True,
                        verbose=1)
            from IPython.core.debugger import Pdb
            Pdb().set_trace()
示例#28
0
def synthesize(
        fs,
        f0s,
        SPEC,
        NM=None,
        wavlen=None,
        ener_multT0=False,
        nm_cont=False  # If False, force binary state of the noise mask (by thresholding at 0.5)
    ,
        nm_lowpasswinlen=9,
        hp_f0coef=0.5  # factor of f0 for the cut-off of the high-pass filter (def. 0.5*f0)
    ,
        antipreechohwindur=0.001  # [s] Use to damp the signal at the beginning of the signal AND at the end of it
    # Following options are for post-processing the features, after the generation/transformation and thus before waveform synthesis
    ,
        pp_f0_rmsteps=False  # Removes steps in the f0 curve
    # (see sigproc.resampling.f0s_rmsteps(.) )
    ,
        pp_f0_smooth=None  # Smooth the f0 curve using median and FIR filters of given window duration [s]
    ,
        pp_atten1stharminsilences=None  # Typical value is -25
    ,
        verbose=1):

    # Copy the inputs to avoid modifying them
    f0s = f0s.copy()
    SPEC = SPEC.copy()
    if not NM is None: NM = NM.copy()
    else: NM = np.zeros(SPEC.shape)

    # Check the size of the inputs
    if f0s.shape[0] != SPEC.shape[0]:
        raise ValueError(
            'F0 size {} and spectrogram size {} do not match'.format(
                len(f0), SPEC.shape[0]))
    if not NM is None:
        if SPEC.shape != NM.shape:
            raise ValueError(
                'spectrogram size {} and NM size {} do not match.'.format(
                    SPEC.shape, NM.shape))

    if wavlen == None: wavlen = int(np.round(f0s[-1, 0] * fs))
    dftlen = (SPEC.shape[1] - 1) * 2
    shift = np.median(np.diff(f0s[:, 0]))
    if verbose > 0:
        print(
            'PM Synthesis (dur={}s, fs={}Hz, f0 in [{:.0f},{:.0f}]Hz, shift={}s, dftlen={})'
            .format(wavlen / float(fs), fs, np.min(f0s[:, 1]),
                    np.max(f0s[:, 1]), shift, dftlen))

    # Prepare the features

    # Enforce continuous f0
    f0s[:, 1] = np.interp(f0s[:, 0], f0s[f0s[:, 1] > 0, 0], f0s[f0s[:, 1] > 0,
                                                                1])
    # If asked, removes steps in the f0 curve
    if pp_f0_rmsteps:
        f0s = sp.f0s_rmsteps(f0s)
    # If asked, smooth the f0 curve using median and FIR filters
    if not pp_f0_smooth is None:
        print('    Smoothing f0 curve using {}[s] window'.format(pp_f0_smooth))
        import scipy.signal as sig
        lf0 = np.log(f0s[:, 1])
        bcoefslen = int(0.5 * pp_f0_smooth / shift) * 2 + 1
        lf0 = sig.medfilt(lf0, bcoefslen)
        bcoefs = np.hamming(bcoefslen)
        bcoefs = bcoefs / sum(bcoefs)
        lf0 = sig.filtfilt(bcoefs, [1], lf0)
        f0s[:, 1] = np.exp(lf0)

    if not NM is None:
        # Remove noise below f0, as it is supposed to be already the case
        for n in range(NM.shape[0]):
            NM[n, :int((float(dftlen) / fs) * 2 * f0s[n, 1])] = 0.0

    if not nm_cont:
        print('    Forcing binary noise mask')
        NM[NM <= 0.5] = 0.0  # To be sure that voiced segments are not hoarse
        NM[NM > 0.5] = 1.0  # To be sure the noise segments are fully noisy

    # Generate the pulse positions [1](2) (i.e. the synthesis instants, the GCIs in voiced segments)
    ts = [0.0]
    while ts[-1] < float(wavlen) / fs:
        cf0 = np.interp(ts[-1], f0s[:, 0], f0s[:, 1])
        if cf0 < 50.0: cf0 = 50
        ts.append(ts[-1] + (1.0 / cf0))
    ts = np.array(ts)
    f0s = np.vstack((ts, np.interp(ts, f0s[:, 0], f0s[:, 1]))).T

    # Resample the features to the pulse positions

    # Spectral envelope uses the nearest, to avoid over-smoothing
    SPECR = np.zeros((f0s.shape[0], dftlen / 2 + 1))
    for n, t in enumerate(f0s[:, 0]):  # Nearest: Way better for plosives
        idx = int(np.round(t / shift))
        idx = np.clip(idx, 0, SPEC.shape[0] - 1)
        SPECR[n, :] = SPEC[idx, :]

    # Keep trace of the median energy [dB] over the whole signal
    ener = np.mean(SPECR, axis=1)
    idxacs = np.where(sp.mag2db(ener) > sp.mag2db(np.max(ener)) -
                      30)[0]  # Get approx active frames # TODO Param
    enermed = sp.mag2db(np.median(ener[idxacs]))  # Median energy [dB]
    ener = sp.mag2db(ener)

    # Resample the noise feature to the pulse positions
    # Smooth the frequency response of the mask in order to avoid Gibbs
    # (poor Gibbs nobody want to see him)
    nm_lowpasswin = np.hanning(nm_lowpasswinlen)
    nm_lowpasswin /= np.sum(nm_lowpasswin)
    NMR = np.zeros((f0s.shape[0], dftlen / 2 + 1))
    for n, t in enumerate(f0s[:, 0]):
        idx = int(np.round(t / shift))  # Nearest is better for plosives
        idx = np.clip(idx, 0, NM.shape[0] - 1)
        NMR[n, :] = NM[idx, :]
        if nm_lowpasswinlen > 1:
            NMR[n, :] = scipy.signal.filtfilt(nm_lowpasswin, [1.0], NMR[n, :])

    NMR = np.clip(NMR, 0.0, 1.0)

    # The complete waveform that we will fill with the pulses
    wav = np.zeros(wavlen)
    # Half window on the left of the synthesized segment to avoid pre-echo
    dampinhwin = np.hanning(
        1 +
        2 * int(np.round(antipreechohwindur * fs)))  # 1ms forced dampingwindow
    dampinhwin = dampinhwin[:(len(dampinhwin) - 1) / 2 + 1]

    for n, t in enumerate(f0s[:, 0]):
        f0 = f0s[n, 1]

        if verbose > 1:
            print "\rPM Synthesis (python) t={:4.3f}s f0={:3.3f}Hz               ".format(
                t, f0),

        # Window's length
        nbper = 4
        # TODO It should be ensured that the beggining and end of the
        #      noise is within the window. Nothing is doing this currently!
        winlen = int(np.max(
            (0.050 * fs, nbper * fs / f0)) / 2) * 2 + 1  # Has to be odd
        # TODO We also assume that the VTF's decay is shorter
        #      than nbper-1 periods (dangerous with high pitched tense voice).
        if winlen > dftlen:
            raise ValueError('winlen({})>dftlen({})'.format(winlen, dftlen))

        # Set the rough position of the pulse in the window (the closest sample)
        # We keep a third of the window (1 period) on the left because the
        # pulse signal is minimum phase. And 2/3rd (remaining 2 periods)
        # on the right to let the VTF decay.
        pulseposinwin = int((1.0 / nbper) * winlen)

        # The sample indices of the current pulse wrt. the final waveform
        winidx = int(round(fs * t)) + np.arange(winlen) - pulseposinwin

        # Build the pulse spectrum

        # Let start with a Dirac
        S = np.ones(dftlen / 2 + 1, dtype=np.complex64)

        # Add the delay to place the Dirac at the "GCI": exp(-j*2*pi*t_i)
        delay = -pulseposinwin - fs * (t - int(round(fs * t)) / float(fs))
        S *= np.exp((delay * 2j * np.pi / dftlen) * np.arange(dftlen / 2 + 1))

        # Add the spectral envelope
        # Both amplitude and phase
        E = SPECR[n, :]  # Take the amplitude from the given one
        if hp_f0coef != None:
            # High-pass it to avoid any residual DC component.
            fcut = hp_f0coef * f0
            if not pp_atten1stharminsilences is None and ener[
                    n] - enermed < pp_atten1stharminsilences:
                fcut = 1.5 * f0  # Try to cut between first and second harm
            HP = sp.butter2hspec(fcut, 4, fs, dftlen, high=True)
            E *= HP
            # Not necessarily good as it is non-causal, so make it causal...
            # ... together with the VTF response below.
        # Build the phase of the envelope from the amplitude
        E = sp.hspec2minphasehspec(E, replacezero=True)  # We spend 2 FFT here!
        S *= E  # Add it to the current pulse

        # Add energy correction wrt f0.
        # STRAIGHT and AHOCODER vocoders do it.
        # (why ? to equalize the energy when changing the pulse's duration ?)
        if ener_multT0:
            S *= np.sqrt(fs / f0)

        # Generate the segment of Gaussian noise
        # Use mid-points before/after pulse position
        if n > 0: leftbnd = int(np.round(fs * 0.5 * (f0s[n - 1, 0] + t)))
        else: leftbnd = int(np.round(fs * (t - 0.5 / f0s[n, 1])))  # int(0)
        if n < f0s.shape[0] - 1:
            rightbnd = int(np.round(fs * 0.5 * (t + f0s[n + 1, 0]))) - 1
        else:
            rightbnd = int(np.round(
                fs * (t + 0.5 / f0s[n, 1])))  #rightbnd=int(wavlen-1)
        gausswinlen = rightbnd - leftbnd  # The length of the noise segment
        gaussnoise4win = np.random.normal(size=(gausswinlen))  # The noise

        GN = np.fft.rfft(gaussnoise4win,
                         dftlen)  # Move the noise to freq domain
        # Normalize it by its energy (@Yannis, That's your answer at SSW9!)
        GN /= np.sqrt(np.mean(np.abs(GN)**2))
        # Place the noise within the pulse's window
        delay = (pulseposinwin - (leftbnd - winidx[0]))
        GN *= np.exp((delay * 2j * np.pi / dftlen) * np.arange(dftlen / 2 + 1))

        # Add it to the pulse spectrum, under the condition of the mask
        S *= GN**NMR[n, :]

        # That's it! the pulse spectrum is ready!

        # Move it to time domain
        deter = np.fft.irfft(S)[0:winlen]

        # Add half window on the left of the synthesized segment
        # to avoid any possible pre-echo
        deter[:leftbnd - winidx[0] - len(dampinhwin)] = 0.0
        deter[leftbnd - winidx[0] - len(dampinhwin):leftbnd -
              winidx[0]] *= dampinhwin

        # Add half window on the right
        # to avoid cutting the VTF response abruptly
        deter[-len(dampinhwin):] *= dampinhwin[::-1]

        # Write the synthesized segment in the final waveform
        if winidx[0] < 0 or winidx[-1] >= wavlen:
            # The window is partly outside of the waveform ...
            wav4win = np.zeros(winlen)
            # ... thus copy only the existing part
            itouse = np.logical_and(winidx >= 0, winidx < wavlen)
            wav[winidx[itouse]] += deter[itouse]
        else:
            wav[winidx] += deter

    if verbose > 1:
        print '\r                                                               \r',

    if verbose > 2:
        import matplotlib.pyplot as plt
        plt.ion()
        f, axs = plt.subplots(3, 1, sharex=True, sharey=False)
        times = np.arange(len(wav)) / float(fs)
        axs[0].plot(times, wav, 'k')
        axs[0].set_ylabel('Waveform\nAmplitude')
        axs[0].grid()
        axs[1].plot(f0s[:, 0], f0s[:, 1], 'k')
        axs[1].set_ylabel('F0\nFrequency [Hz]')
        axs[1].grid()
        axs[2].imshow(sp.mag2db(SPEC).T,
                      origin='lower',
                      aspect='auto',
                      interpolation='none',
                      extent=(f0s[0, 0], f0s[-1, 0], 0, 0.5 * fs))
        axs[2].set_ylabel('Amp. Envelope\nFrequency [Hz]')

        from IPython.core.debugger import Pdb
        Pdb().set_trace()

    return wav
示例#29
0
def set_trace():
    """A Poor mans break point"""
    # without this in iPython debugger can generate strange characters.
    from IPython.core.debugger import Pdb
    Pdb().set_trace(sys._getframe().f_back)
示例#30
0
文件: train.py 项目: siida36/fairseq
def main(args):
    if args.max_tokens is None:
        args.max_tokens = 6000
    print(args)

    if not torch.cuda.is_available():
        raise NotImplementedError('Training on CPU is not supported')
    torch.cuda.set_device(args.device_id)
    torch.manual_seed(args.seed)

    # Setup task, e.g., translation, language modeling, etc.
    task = tasks.setup_task(args)

    # Load dataset splits
    load_dataset_splits(task, ['train', 'valid'])

    # Build model and criterion
    model = task.build_model(args)
    criterion = task.build_criterion(args)
    print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
    print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters())))

    # Build trainer
    if args.fp16:
        if torch.cuda.get_device_capability(0)[0] < 7:
            print('| WARNING: your device does NOT support faster training with --fp16,'
                  ' please switch to FP32 which is likely to be faster')
        trainer = FP16Trainer(args, task, model, criterion)
    else:
        if torch.cuda.get_device_capability(0)[0] >= 7:
            print('| NOTICE: your device may support faster training with --fp16')
        trainer = Trainer(args, task, model, criterion)
    print('| training on {} GPUs'.format(args.distributed_world_size))
    print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
        args.max_tokens,
        args.max_sentences,
    ))

    # Initialize dataloader
    from IPython.core.debugger import Pdb; Pdb().set_trace()
    max_positions = utils.resolve_max_positions(
        task.max_positions(),
        trainer.get_model().max_positions(),
    )
    epoch_itr = task.get_batch_iterator(
        dataset=task.dataset(args.train_subset),
        max_tokens=args.max_tokens,
        max_sentences=args.max_sentences,
        max_positions=max_positions,
        ignore_invalid_inputs=True,
        required_batch_size_multiple=8,
        seed=args.seed,
        num_shards=args.distributed_world_size,
        shard_id=args.distributed_rank,
    )

    # Load the latest checkpoint if one is available
    if not load_checkpoint(args, trainer, epoch_itr):
        # Send a dummy batch to warm the caching allocator
        dummy_batch = task.dataset('train').get_dummy_batch(args.max_tokens, max_positions)
        trainer.dummy_train_step(dummy_batch) # comment out for debug

    # Train until the learning rate gets too small
    max_epoch = args.max_epoch or math.inf
    max_update = args.max_update or math.inf
    lr = trainer.get_lr()
    train_meter = StopwatchMeter()
    train_meter.start()
    valid_losses = [None]
    valid_subsets = args.valid_subset.split(',')
    while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
        # train for one epoch
        train(args, trainer, task, epoch_itr)

        if epoch_itr.epoch % args.validate_interval == 0:
            valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)

        # only use first validation loss to update the learning rate
        lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])

        # save checkpoint
        if epoch_itr.epoch % args.save_interval == 0:
            save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
    train_meter.stop()
    print('| done training in {:.1f} seconds'.format(train_meter.sum))
示例#31
0
def plot_features(wav=None,
                  fs=None,
                  f0s=None,
                  SPEC=None,
                  PDD=None,
                  NM=None):  # pragma: no cover
    # TODO Could test this by writting in a picture
    tstart = 0.0
    tend = 1.0
    nbview = 0
    if not wav is None: nbview += 1
    if not f0s is None: nbview += 1
    if not SPEC is None: nbview += 1
    if not PDD is None: nbview += 1
    if not NM is None: nbview += 1
    import matplotlib.pyplot as plt
    plt.ion()
    _, axs = plt.subplots(nbview, 1, sharex=True, sharey=False)
    if not isinstance(axs, np.ndarray): axs = np.array([axs])
    view = 0
    if not wav is None:
        times = np.arange(len(wav)) / float(fs)
        axs[view].plot(times, wav, 'k')
        axs[view].set_ylabel('Waveform\nAmplitude')
        axs[view].grid()
        axs[view].set_xlim((0.0, times[-1]))
        view += 1
    if not f0s is None:
        tstart = f0s[0, 0]
        tend = f0s[-1, 0]
        axs[view].plot(f0s[:, 0], f0s[:, 1], 'k')
        axs[view].set_ylabel('F0\nFrequency [Hz]')
        axs[view].grid()
        view += 1
    if not SPEC is None:
        axs[view].imshow(sp.mag2db(SPEC).T,
                         origin='lower',
                         aspect='auto',
                         interpolation='none',
                         extent=(tstart, tend, 0, 0.5 * fs),
                         cmap='jet')
        axs[view].set_ylabel('Amp. Envelope\nFrequency [Hz]')
        view += 1
    if not PDD is None:
        axs[view].imshow(PDD.T,
                         origin='lower',
                         aspect='auto',
                         interpolation='none',
                         extent=(tstart, tend, 0, 0.5 * fs),
                         cmap='jet',
                         vmin=0.0,
                         vmax=2.0)
        axs[view].set_ylabel('PDD\nFrequency [Hz]')
        view += 1
    if not NM is None:
        axs[view].imshow(NM.T,
                         origin='lower',
                         aspect='auto',
                         interpolation='none',
                         extent=(tstart, tend, 0, 0.5 * fs),
                         cmap='Greys',
                         vmin=0.0,
                         vmax=1.0)
        axs[view].set_ylabel('Noise Mask \nFrequency [Hz]')
        view += 1
    axs[-1].set_xlabel('Time [s]')
    from IPython.core.debugger import Pdb
    Pdb().set_trace()
示例#32
0
# IPython Step by step debugging of the imported module
from IPython.core.debugger import Pdb
ipdb = Pdb()
ipdb.runcall(my_imported_function, args...)
示例#33
0
etd_plza_df = DataFrame(etd2('PLZA'))
etd_plza_df

# <codecell>

%%html
<iframe src="http://www.bart.gov/schedules/eta?stn=PLZA"/ width=800 height=600>

# <markdowncell>

# How to match the real time estimate with the schedule?
# 
# Let's focus just on the next arrival.
# 
# Might need 
# Possible that a train is late
# 
# In order to compare to schedule for a given station, need to know what route we're considering.

# <codecell>

from IPython.core.debugger import Pdb
pdb = Pdb()
pdb.runcall(bart.get_station_schedule, 'PLZA')

# <codecell>

stations_df.abbr

示例#34
0
def runcall(*args, **kwargs):
    return Pdb(def_colors).runcall(*args, **kwargs)
示例#35
0
def set_trace(): from IPython.core.debugger import Pdb; Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back) # set bpt
def debug(f,*args,**kargs): from IPython.core.debugger import Pdb; pdb=Pdb(color_scheme='Linux'); return pdb.runcall(f,*args,**kargs) # dbg_run_fn
示例#36
0
文件: config.py 项目: joeywen/zarkov
def postmortem_hook(etype, value, tb): # pragma no cover
    import pdb, traceback
    try:
        from IPython.core.debugger import Pdb
        sys.stderr.write('Entering post-mortem IPDB shell\n')
        p = Pdb(color_scheme='Linux')
        p.reset()
        p.setup(None, tb)
        p.print_stack_trace()
        sys.stderr.write('%s: %s\n' % ( etype, value))
        p.cmdloop()
        p.forget()
        # p.interaction(None, tb)
    except ImportError:
        sys.stderr.write('Entering post-mortem PDB shell\n')
        traceback.print_exception(etype, value, tb)
        pdb.post_mortem(tb)
示例#37
0
 def __init__(self, *args, pt_session_options=None, **kwargs):
     Pdb.__init__(self, *args, **kwargs)
     self._ptcomp = None
     self.pt_init(pt_session_options)
     self.thread_executor = ThreadPoolExecutor(1)
示例#38
0
 def set_trace():
     Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
示例#39
0
import numpy as np
import sys

import . straight

if  __name__ == "__main__" :
    argpar = argparse.ArgumentParser()
    argpar.add_argument("aperfile", help="Input bap file")
    argpar.add_argument("--aperdtype", default='float32', help="The data type of the bap file")
    argpar.add_argument("--aperdftlen", default=4096, type=int, help="Size of a frame for the aperiodicity")
    argpar.add_argument("--fs", default=16000.0, type=float, help="Sampling rate")
    argpar.add_argument("--sigp", default=1.2, type=float, help="Sigmoid parameter")
    argpar.add_argument("--bndapsize", default=None, type=int, help="Size of a frame for the band aperiodicities")
    argpar.add_argument("--bndapdtype", default='float32', help="The data format of the output aperiodicity")
    args = argpar.parse_args()

    APER = np.fromfile(args.bapfile, dtype=args.aperdtype)
    APER = APER.reshape((-1, args.aperdftlen))

    BNDAP = straight.aper2bndap(APER, args.fs, args.bndapsize, args.sigp)

    BNDAP.astype(args.bndapdtype).tofile(sys.stdout)

    if 0:
        import matplotlib.pyplot as plt
        plt.ion()
        #f, axs = plt.subplots(2, 1, sharex=True, sharey=False)
        #axs[0].imshow(BAP.T, origin='lower', aspect='auto', interpolation='none', vmin=-40, vmax=-5)
        #axs[1].imshow(WNZ.T, origin='lower', aspect='auto', interpolation='none', vmin=-40, vmax=-5)
        from IPython.core.debugger import  Pdb; Pdb().set_trace()
示例#40
0
文件: bug.py 项目: jagguli/PyBug
 if file is None:
     file = frame.f_code.co_filename
 elif not file.startswith("file:") and os.path.sep not in file:
     try:
         mod = __import__(file, globals(), locals(), ["__file__"])
     except ImportError, err:
         if throw:
             raise
         sys.__stdout__.write("cannot set breakpoint: %s:%s : %s" %
             (file, line, err))
         return
     file = mod.__file__
     sys.__stdout__.write("breaking in: %s" % file)
 if file.endswith(".pyc"):
     file = file[:-1]
 pdb = Pdb(color_scheme='Linux', stdout=sys.__stdout__) # use sys.__stdout__ to work with nose tests
 pdb.reset()
 pdb.curframe = frame
 while frame:
     frame.f_trace = pdb.trace_dispatch
     pdb.botframe = frame
     frame = frame.f_back
 templine = line
 while templine < line + 10:
     error = pdb.set_break(file, templine, cond=cond, temporary=temp)
     if error:
         templine += 1
     else:
         break
 if error:
     error = pdb.set_break(file, line, cond=cond, temporary=temp)
示例#41
0
 def do_EOF(self, arg):
     """Clean-up and do underlying EOF."""
     try:
         return Pdb.do_EOF(self, arg)
     finally:
         self.shutdown()
示例#42
0
def debug( f, *args, ** kwargs): 
    from IPython.core.debugger import Pdb 
    pdb = Pdb( color_scheme='Linux') 
    return pdb.runcall(f, *args, **kwargs)
示例#43
0
文件: core.py 项目: wtak23/pytak
def set_trace():
    """ from wes mckiness book """
    from IPython.core.debugger import Pdb
    Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
示例#44
0
 def set_trace(self, frame=None):
     _stdin = sys.stdin
     sys.stdin = file("/dev/stdin")
     if frame is None:
         frame = sys._getframe().f_back
     Pdb("Linux").set_trace(frame)
示例#45
0
def trace(code, preparse=True):
    r"""
    Evaluate Sage code using the interactive tracer and return the
    result. The string ``code`` must be a valid expression
    enclosed in quotes (no assignments - the result of the expression
    is returned). In the Sage notebook this just raises a
    NotImplementedException.

    INPUT:


    -  ``code`` - str

    -  ``preparse`` - bool (default: True); if True, run
       expression through the Sage preparser.


    REMARKS: This function is extremely powerful! For example, if you
    want to step through each line of execution of, e.g.,
    ``factor(100)``, type

    ::

        sage: trace("factor(100)")             # not tested

    then at the (Pdb) prompt type ``s`` (or ``step``), then press return
    over and over to step through every line of Python that is called
    in the course of the above computation. Type ``?`` at any time for
    help on how to use the debugger (e.g., ``l`` lists 11 lines around
    the current line; ``bt`` gives a back trace, etc.).

    Setting a break point: If you have some code in a file and would
    like to drop into the debugger at a given point, put the following
    code at that point in the file:

    ``import pdb; pdb.set_trace()``

    For an article on how to use the Python debugger, see
    http://www.onlamp.com/pub/a/python/2005/09/01/debugger.html

    TESTS:

    For tests we disable garbage collection, see :trac:`21258` ::

        sage: import gc
        sage: gc.disable()

    The only real way to test this is via pexpect spawning a
    sage subprocess that uses IPython::

        sage: import pexpect
        sage: s = pexpect.spawn('sage')
        sage: _ = s.sendline("trace('print(factor(10))'); print(3+97)")
        sage: _ = s.expect('ipdb>', timeout=90)
        sage: _ = s.sendline("s"); _ = s.sendline("c");
        sage: _ = s.expect('100', timeout=90)

    Seeing the ipdb prompt and the 2 \* 5 in the output below is a
    strong indication that the trace command worked correctly::

        sage: print(s.before[s.before.find('--'):])
        --...
        ipdb> c
        2 * 5

    We test what happens in notebook embedded mode::

        sage: sage.plot.plot.EMBEDDED_MODE = True
        sage: trace('print(factor(10))')
        Traceback (most recent call last):
        ...
        NotImplementedError: the trace command is not implemented in the Sage notebook; you must use the command line.

    Re-enable garbage collection::

        sage: gc.enable()
    """
    from sage.plot.plot import EMBEDDED_MODE
    if EMBEDDED_MODE:
        raise NotImplementedError("the trace command is not implemented in the Sage notebook; you must use the command line.")

    from IPython.core.debugger import Pdb
    pdb = Pdb()

    try:
        ipython = get_ipython()
    except NameError:
        raise NotImplementedError("the trace command can only be run from the Sage command-line")

    from sage.repl.preparse import preparse
    code = preparse(code)
    return pdb.run(code, ipython.user_ns)
示例#46
0
 def do_continue(self, arg):
     """Clean-up and do underlying continue."""
     try:
         return Pdb.do_continue(self, arg)
     finally:
         self.shutdown()
示例#47
0
                  1]  # Cut according to the new size
        rcc[-1] *= 0.5  # This one is supposed to be half the energy of the other bins (not 100% sure of this TODO)
        RSPEC[n, :] = np.real(np.fft.rfft(rcc, args.outdftlen))
    if not args.outlog:
        RSPEC = np.exp(RSPEC)
    RSPEC.astype('float32').tofile(args.outspecfile)

    if 0:
        shift = 0.005
        import matplotlib.pyplot as plt
        plt.ion()
        ts = shift * np.arange(SPEC.shape[0])
        plt.subplot(211)
        plt.imshow(np.log(SPEC).T,
                   origin='lower',
                   aspect='auto',
                   interpolation='none',
                   cmap='jet',
                   extent=[0.0, ts[-1], 0.0, args.fs / 2])
        plt.subplot(212)
        if not args.outlog:
            RSPEC = np.log(RSPEC)
        plt.imshow(RSPEC.T,
                   origin='lower',
                   aspect='auto',
                   interpolation='none',
                   cmap='jet',
                   extent=[0.0, ts[-1], 0.0, args.fs / 2])
        from IPython.core.debugger import Pdb
        Pdb().set_trace()
示例#48
0
    def __init__(self, addr="127.0.0.1", port=4444):
        """Initialize the socket and initialize pdb."""

        # Backup stdin and stdout before replacing them by the socket handle
        self.old_stdout = sys.stdout
        self.old_stdin = sys.stdin
        self.port = port

        # Open a 'reusable' socket to let the webapp reload on the same port
        self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
        self.skt.bind((addr, port))
        self.skt.listen(1)

        # Writes to stdout are forbidden in mod_wsgi environments
        try:
            sys.stderr.write("pdb is running on %s:%d\n"
                             % self.skt.getsockname())
        except IOError:
            pass

        (clientsocket, address) = self.skt.accept()
        handle = clientsocket.makefile('rw')

        Pdb.__init__(self, color_scheme='Linux', completekey='tab',
                     stdin=FileObjectWrapper(handle, self.old_stdin),
                     stdout=FileObjectWrapper(handle, self.old_stdin))
        handle.write("writing to handle")
        def import_module(possible_modules, needed_module):
            """Make it more resilient to different versions of IPython and try to
            find a module."""
            count = len(possible_modules)
            for module in possible_modules:
                sys.stderr.write(module)

                try:
                    return __import__(module, fromlist=[needed_module])
                except ImportError:
                    count -= 1
                    if count == 0:
                        raise

        possible_modules = ['IPython.terminal.ipapp',           # Newer IPython
                            'IPython.frontend.terminal.ipapp']  # Older IPython

        app = import_module(possible_modules, "TerminalIPythonApp")
        TerminalIPythonApp = app.TerminalIPythonApp

        possible_modules = ['IPython.terminal.embed',           # Newer IPython
                            'IPython.frontend.terminal.embed']  # Older IPython
        embed = import_module(possible_modules, "InteractiveShellEmbed")
        InteractiveShellEmbed = embed.InteractiveShellEmbed

        try:
            get_ipython
        except NameError:
            # Build a terminal app in order to force ipython to load the
            # configuration
            ipapp = TerminalIPythonApp()
            # Avoid output (banner, prints)
            ipapp.interact = False
            ipapp.initialize()
            def_colors = ipapp.shell.colors
        else:
            # If an instance of IPython is already running try to get an instance
            # of the application. If there is no TerminalIPythonApp instanciated
            # the instance method will create a new one without loading the config.
            # i.e: if we are in an embed instance we do not want to load the config.
            ipapp = TerminalIPythonApp.instance()
            shell = get_ipython()
            def_colors = shell.colors

            # Detect if embed shell or not and display a message
            if isinstance(shell, InteractiveShellEmbed):
                shell.write_err(
                    "\nYou are currently into an embedded ipython shell,\n"
                    "the configuration will not be loaded.\n\n"
                )

        self.rcLines += [line + '\n' for line in ipapp.exec_lines]
        sys.stdout = sys.stdin = handle
        OCCUPIED.claim(port, sys.stdout)
        sys.stderr.write(str(self.rcLines))
示例#49
0
    def train(self,
              params,
              indir,
              outdir,
              wdir,
              fid_lst_tra,
              fid_lst_val,
              X_vals,
              Y_vals,
              cfg,
              params_savefile,
              trialstr='',
              cont=None):

        print('Model initial status before training')
        worst_val = data.cost_0pred_rmse(Y_vals)  # RMSE
        print("    0-pred validation RMSE = {} (100%)".format(worst_val))
        init_pred_rms = data.prediction_rms(self._model, [X_vals])
        print('    initial RMS of prediction = {}'.format(init_pred_rms))
        init_val = data.cost_model_prediction_rmse(self._model, [X_vals],
                                                   Y_vals)
        best_val = None
        print("    initial validation RMSE = {} ({:.4f}%)".format(
            init_val, 100.0 * init_val / worst_val))

        nbbatches = int(len(fid_lst_tra) / cfg.train_batch_size)
        print('    using {} batches of {} sentences each'.format(
            nbbatches, cfg.train_batch_size))
        print('    model #parameters={}'.format(self._model.nbParams()))

        nbtrainframes = 0
        for fid in fid_lst_tra:
            X = data.loadfile(outdir, fid)
            nbtrainframes += X.shape[0]
        frameshift = 0.005  # TODO
        print('    Training set: {} sentences, #frames={} ({})'.format(
            len(fid_lst_tra), nbtrainframes,
            time.strftime('%H:%M:%S', time.gmtime(
                (nbtrainframes * frameshift)))))
        print('    #parameters/#frames={:.2f}'.format(
            float(self._model.nbParams()) / nbtrainframes))
        if cfg.train_nbepochs_scalewdata and not cfg.train_batch_lengthmax is None:
            # During an epoch, the whole data is _not_ seen by the training since cfg.train_batch_lengthmax is limited and smaller to the sentence size.
            # To compensate for it and make the config below less depedent on the data, the min ans max nbepochs are scaled according to the missing number of frames seen.
            # TODO Should consider only non-silent frames, many recordings have a lot of pre and post silences
            epochcoef = nbtrainframes / float(
                (cfg.train_batch_lengthmax * len(fid_lst_tra)))
            print('    scale number of epochs wrt number of frames')
            cfg.train_min_nbepochs = int(cfg.train_min_nbepochs * epochcoef)
            cfg.train_max_nbepochs = int(cfg.train_max_nbepochs * epochcoef)
            print('        train_min_nbepochs={}'.format(
                cfg.train_min_nbepochs))
            print('        train_max_nbepochs={}'.format(
                cfg.train_max_nbepochs))

        if self._errtype == 'WGAN':
            print('Preparing critic for WGAN...')
            critic_input_var = T.tensor3(
                'critic_input'
            )  # Either real data to predict/generate, or, fake data that has been generated

            [critic, layer_critic, layer_cond] = self._model.build_critic(
                critic_input_var,
                self._model._input_values,
                self._model.vocoder,
                self._model.insize,
                use_LSweighting=(cfg.train_LScoef > 0.0),
                LSWGANtransfreqcutoff=self._LSWGANtransfreqcutoff,
                LSWGANtranscoef=self._LSWGANtranscoef,
                use_WGAN_incnoisefeature=self._WGAN_incnoisefeature)

            # Create expression for passing real data through the critic
            real_out = lasagne.layers.get_output(critic)
            # Create expression for passing fake data through the critic
            genout = lasagne.layers.get_output(self._model.net_out)
            indict = {
                layer_critic: lasagne.layers.get_output(self._model.net_out),
                layer_cond: self._model._input_values
            }
            fake_out = lasagne.layers.get_output(critic, indict)

            # Create generator's loss expression
            # Force LSE for low frequencies, otherwise the WGAN noise makes the voice hoarse.
            print('WGAN Weighted LS - Generator part')

            wganls_weights_els = []
            wganls_weights_els.append([0.0])  # For f0
            specvs = np.arange(self._model.vocoder.specsize(),
                               dtype=theano.config.floatX)
            if cfg.train_LScoef == 0.0:
                wganls_weights_els.append(
                    np.ones(self._model.vocoder.specsize())
                )  # No special weighting for spec
            else:
                wganls_weights_els.append(
                    nonlin_sigmoidparm(
                        specvs,
                        sp.freq2fwspecidx(self._LSWGANtransfreqcutoff,
                                          self._model.vocoder.fs,
                                          self._model.vocoder.specsize()),
                        self._LSWGANtranscoef))  # For spec
            if self._model.vocoder.noisesize() > 0:
                if self._WGAN_incnoisefeature:
                    noisevs = np.arange(self._model.vocoder.noisesize(),
                                        dtype=theano.config.floatX)
                    wganls_weights_els.append(
                        nonlin_sigmoidparm(
                            noisevs,
                            sp.freq2fwspecidx(self._LSWGANtransfreqcutoff,
                                              self._model.vocoder.fs,
                                              self._model.vocoder.noisesize()),
                            self._LSWGANtranscoef))  # For noise
                else:
                    wganls_weights_els.append(
                        np.zeros(self._model.vocoder.noisesize()))
            if self._model.vocoder.vuvsize() > 0:
                wganls_weights_els.append([0.0])  # For vuv
            wganls_weights_ = np.hstack(wganls_weights_els)

            # TODO build wganls_weights_ for LSE instead for WGAN, for consistency with the paper

            # wganls_weights_ = np.hstack((wganls_weights_, wganls_weights_, wganls_weights_)) # That would be for MLPG using deltas
            wganls_weights_ *= (1.0 - cfg.train_LScoef)

            lserr = lasagne.objectives.squared_error(genout,
                                                     self._target_values)
            wganls_weights_ls = theano.shared(value=(1.0 - wganls_weights_),
                                              name='wganls_weights_ls')

            wganpart = fake_out * np.mean(
                wganls_weights_
            )  # That's a way to automatically balance the WGAN and LSE costs wrt the LSE spectral weighting
            lsepart = lserr * wganls_weights_ls  # Spectral weighting as complement of the WGAN part spectral weighting

            generator_loss = -wganpart.mean() + lsepart.mean(
            )  # A term in [-oo,oo] and one in [0,oo] ... why not, LSE as to be small enough for WGAN to do something.

            generator_lossratio = abs(wganpart.mean()) / abs(lsepart.mean())

            critic_loss = fake_out.mean() - real_out.mean(
            )  # For clarity: we want to maximum real-fake -> -(real-fake) -> fake-real

            # Improved training for Wasserstein GAN
            epsi = T.TensorType(dtype=theano.config.floatX,
                                broadcastable=(False, True, True))()
            mixed_X = (epsi * genout) + (1 - epsi) * critic_input_var
            indict = {
                layer_critic: mixed_X,
                layer_cond: self._model._input_values
            }
            output_D_mixed = lasagne.layers.get_output(critic, inputs=indict)
            grad_mixed = T.grad(T.sum(output_D_mixed), mixed_X)
            norm_grad_mixed = T.sqrt(T.sum(T.square(grad_mixed), axis=[1, 2]))
            grad_penalty = T.mean(T.square(norm_grad_mixed - 1))
            critic_loss = critic_loss + cfg.train_pg_lambda * grad_penalty

            # Create update expressions for training
            critic_params = lasagne.layers.get_all_params(critic,
                                                          trainable=True)
            critic_updates = lasagne.updates.adam(
                critic_loss,
                critic_params,
                learning_rate=cfg.train_D_learningrate,
                beta1=cfg.train_D_adam_beta1,
                beta2=cfg.train_D_adam_beta2)
            print('    Critic architecture')
            print_network(critic, critic_params)

            generator_params = lasagne.layers.get_all_params(
                self._model.net_out, trainable=True)
            generator_updates = lasagne.updates.adam(
                generator_loss,
                generator_params,
                learning_rate=cfg.train_G_learningrate,
                beta1=cfg.train_G_adam_beta1,
                beta2=cfg.train_G_adam_beta2)
            self._optim_updates.extend([generator_updates, critic_updates])
            print('    Generator architecture')
            print_network(self._model.net_out, generator_params)

            # Compile functions performing a training step on a mini-batch (according
            # to the updates dictionary) and returning the corresponding score:
            print('Compiling generator training function...')
            generator_train_fn_ins = [self._model._input_values]
            generator_train_fn_ins.append(self._target_values)
            generator_train_fn_outs = [generator_loss, generator_lossratio]
            train_fn = theano.function(generator_train_fn_ins,
                                       generator_train_fn_outs,
                                       updates=generator_updates)
            train_validation_fn = theano.function(generator_train_fn_ins,
                                                  generator_loss,
                                                  no_default_updates=True)
            print('Compiling critic training function...')
            critic_train_fn_ins = [
                self._model._input_values, critic_input_var, epsi
            ]
            critic_train_fn = theano.function(critic_train_fn_ins,
                                              critic_loss,
                                              updates=critic_updates)
            critic_train_validation_fn = theano.function(
                critic_train_fn_ins, critic_loss, no_default_updates=True)

        elif self._errtype == 'LSE':
            print('    LSE Training')
            print_network(self._model.net_out, params)
            predicttrain_values = lasagne.layers.get_output(
                self._model.net_out, deterministic=False)
            costout = (predicttrain_values - self._target_values)**2

            self.cost = T.mean(
                costout)  # self.cost = T.mean(T.sum(costout, axis=-1)) ?

            print("    creating parameters updates ...")
            updates = lasagne.updates.adam(
                self.cost,
                params,
                learning_rate=float(10**cfg.train_learningrate_log10),
                beta1=float(cfg.train_adam_beta1),
                beta2=float(cfg.train_adam_beta2),
                epsilon=float(10**cfg.train_adam_epsilon_log10))

            self._optim_updates.append(updates)
            print("    compiling training function ...")
            train_fn = theano.function(self._model.inputs +
                                       [self._target_values],
                                       self.cost,
                                       updates=updates)
        else:
            raise ValueError('Unknown err type "' + self._errtype +
                             '"')  # pragma: no cover

        costs = defaultdict(list)
        epochs_modelssaved = []
        epochs_durs = []
        nbnodecepochs = 0
        generator_updates = 0
        epochstart = 1
        if cont and os.path.exists(
                os.path.splitext(params_savefile)[0] +
                '-trainingstate-last.pkl'):
            print('    reloading previous training state ...')
            savedcfg, extras, rngstate = self.loadTrainingState(
                os.path.splitext(params_savefile)[0] +
                '-trainingstate-last.pkl', cfg)
            np.random.set_state(rngstate)
            cost_val = extras['cost_val']
            # Restoring some local variables
            costs = extras['costs']
            epochs_modelssaved = extras['epochs_modelssaved']
            epochs_durs = extras['epochs_durs']
            generator_updates = extras['generator_updates']
            epochstart = extras['epoch'] + 1
            # Restore the saving criteria only none of those 3 cfg values changed:
            if (savedcfg.train_min_nbepochs == cfg.train_min_nbepochs) and (
                    savedcfg.train_max_nbepochs == cfg.train_max_nbepochs
            ) and (savedcfg.train_cancel_nodecepochs
                   == cfg.train_cancel_nodecepochs):
                best_val = extras['best_val']
                nbnodecepochs = extras['nbnodecepochs']

        print_log("    start training ...")
        for epoch in range(epochstart, 1 + cfg.train_max_nbepochs):
            timeepochstart = time.time()
            rndidx = np.arange(
                int(nbbatches * cfg.train_batch_size)
            )  # Need to restart from ordered state to make the shuffling repeatable after reloading training state, the shuffling will be different anyway
            np.random.shuffle(rndidx)
            rndidxb = np.split(rndidx, nbbatches)
            cost_tra = None
            costs_tra_batches = []
            costs_tra_gen_wgan_lse_ratios = []
            costs_tra_critic_batches = []
            load_times = []
            train_times = []
            for k in xrange(nbbatches):

                timeloadstart = time.time()
                print_tty('\r    Training batch {}/{}'.format(
                    1 + k, nbbatches))

                # Load training data online, because data is often too heavy to hold in memory
                fid_lst_trab = [fid_lst_tra[bidx] for bidx in rndidxb[k]]
                X_trab, _, Y_trab, _, W_trab = data.load_inoutset(
                    indir,
                    outdir,
                    wdir,
                    fid_lst_trab,
                    length=cfg.train_batch_length,
                    lengthmax=cfg.train_batch_lengthmax,
                    maskpadtype=cfg.train_batch_padtype,
                    cropmode=cfg.train_batch_cropmode)

                if 0:  # Plot batch
                    import matplotlib.pyplot as plt
                    plt.ion()
                    plt.imshow(Y_trab[0, ].T,
                               origin='lower',
                               aspect='auto',
                               interpolation='none',
                               cmap='jet')
                    from IPython.core.debugger import Pdb
                    Pdb().set_trace()

                load_times.append(time.time() - timeloadstart)
                print_tty(' (iter load: {:.6f}s); training '.format(
                    load_times[-1]))

                timetrainstart = time.time()
                if self._errtype == 'WGAN':

                    random_epsilon = np.random.uniform(
                        size=(cfg.train_batch_size, 1, 1)).astype('float32')
                    critic_returns = critic_train_fn(
                        X_trab, Y_trab,
                        random_epsilon)  # Train the criticmnator
                    costs_tra_critic_batches.append(float(critic_returns))

                    # TODO The params below are supposed to ensure the critic is "almost" fully converged
                    #      when training the generator. How to evaluate this? Is it the case currently?
                    if (generator_updates <
                            25) or (generator_updates % 500
                                    == 0):  # TODO Params hardcoded
                        critic_runs = 10  # TODO Params hardcoded 10
                    else:
                        critic_runs = 5  # TODO Params hardcoded 5
                    # martinarjovsky: "- Loss of the critic should never be negative, since outputing 0 would yeald a better loss so this is a huge red flag."
                    # if critic_returns>0 and k%critic_runs==0: # Train only if the estimate of the Wasserstein distance makes sense, and, each N critic iteration TODO Doesn't work well though
                    if k % critic_runs == 0:  # Train each N critic iteration
                        # Train the generator
                        trainargs = [X_trab]
                        trainargs.append(Y_trab)
                        [cost_tra, gen_ratio] = train_fn(*trainargs)
                        cost_tra = float(cost_tra)
                        generator_updates += 1

                        if 0:
                            log_plot_samples(
                                Y_vals,
                                Y_preds,
                                nbsamples=nbsamples,
                                fname=os.path.splitext(params_savefile)[0] +
                                '-fig_samples_' + trialstr +
                                '{:07}.png'.format(generator_updates),
                                vocoder=self._model.vocoder,
                                title='E{} I{}'.format(epoch,
                                                       generator_updates))

                elif self._errtype == 'LSE':
                    train_returns = train_fn(X_trab, Y_trab)
                    cost_tra = np.sqrt(float(train_returns))

                train_times.append(time.time() - timetrainstart)

                if not cost_tra is None:
                    print_tty(
                        'err={:.4f} (iter train: {:.4f}s)                  '.
                        format(cost_tra, train_times[-1]))
                    if np.isnan(cost_tra):  # pragma: no cover
                        print_log(
                            '    previous costs: {}'.format(costs_tra_batches))
                        print_log('    E{} Batch {}/{} train cost = {}'.format(
                            epoch, 1 + k, nbbatches, cost_tra))
                        raise ValueError('ERROR: Training cost is nan!')
                    costs_tra_batches.append(cost_tra)
                    if self._errtype == 'WGAN':
                        costs_tra_gen_wgan_lse_ratios.append(gen_ratio)
            print_tty(
                '\r                                                           \r'
            )
            if self._errtype == 'WGAN':
                costs['model_training'].append(0.1 *
                                               np.mean(costs_tra_batches))
                if cfg.train_LScoef > 0:
                    costs['model_training_wgan_lse_ratio'].append(
                        0.1 * np.mean(costs_tra_gen_wgan_lse_ratios))
            else:
                costs['model_training'].append(np.mean(costs_tra_batches))

            # Eval validation cost
            cost_validation_rmse = data.cost_model_prediction_rmse(
                self._model, [X_vals], Y_vals)
            costs['model_rmse_validation'].append(cost_validation_rmse)

            if self._errtype == 'WGAN':
                train_validation_fn_args = [X_vals]
                train_validation_fn_args.append(Y_vals)
                costs['model_validation'].append(0.1 * data.cost_model_mfn(
                    train_validation_fn, train_validation_fn_args))
                costs['critic_training'].append(
                    np.mean(costs_tra_critic_batches))
                random_epsilon = [
                    np.random.uniform(size=(1, 1)).astype('float32')
                ] * len(X_vals)
                critic_train_validation_fn_args = [
                    X_vals, Y_vals, random_epsilon
                ]
                costs['critic_validation'].append(
                    data.cost_model_mfn(critic_train_validation_fn,
                                        critic_train_validation_fn_args))
                costs['critic_validation_ltm'].append(
                    np.mean(costs['critic_validation']
                            [-cfg.train_validation_ltm_winlen:]))

                cost_val = costs['critic_validation_ltm'][-1]
            elif self._errtype == 'LSE':
                cost_val = costs['model_rmse_validation'][-1]

            print_log(
                "    E{}/{} {}  cost_tra={:.6f} (load:{}s train:{}s)  cost_val={:.6f} ({:.4f}% RMSE)  {} MiB GPU {} MiB RAM"
                .format(epoch, cfg.train_max_nbepochs, trialstr,
                        costs['model_training'][-1],
                        time2str(np.sum(load_times)),
                        time2str(np.sum(train_times)), cost_val,
                        100 * cost_validation_rmse / worst_val,
                        nvidia_smi_gpu_memused(), proc_memresident()))
            sys.stdout.flush()

            if np.isnan(cost_val):
                raise ValueError('ERROR: Validation cost is nan!')
            if (self._errtype == 'LSE') and (
                    cost_val >= cfg.train_cancel_validthresh * worst_val):
                raise ValueError(
                    'ERROR: Validation cost blew up! It is higher than {} times the worst possible values'
                    .format(cfg.train_cancel_validthresh))

            self._model.saveAllParams(os.path.splitext(params_savefile)[0] +
                                      '-last.pkl',
                                      cfg=cfg,
                                      printfn=print_log,
                                      extras={'cost_val': cost_val})

            # Save model parameters
            if epoch >= cfg.train_min_nbepochs:  # Assume no model is good enough before cfg.train_min_nbepochs
                if ((best_val is None) or (cost_val < best_val)
                    ):  # Among all trials of hyper-parameter optimisation
                    best_val = cost_val
                    self._model.saveAllParams(params_savefile,
                                              cfg=cfg,
                                              printfn=print_log,
                                              extras={'cost_val': cost_val},
                                              infostr='(E{} C{:.4f})'.format(
                                                  epoch, best_val))
                    epochs_modelssaved.append(epoch)
                    nbnodecepochs = 0
                else:
                    nbnodecepochs += 1

            if cfg.train_log_plot:
                print_log('    saving plots')
                log_plot_costs(costs,
                               worst_val,
                               fname=os.path.splitext(params_savefile)[0] +
                               '-fig_costs_' + trialstr + '.svg',
                               epochs_modelssaved=epochs_modelssaved)

                nbsamples = 2
                nbsamples = min(nbsamples, len(X_vals))
                Y_preds = []
                for sampli in xrange(nbsamples):
                    Y_preds.append(
                        self._model.predict(
                            np.reshape(
                                X_vals[sampli],
                                [1] + [s for s in X_vals[sampli].shape]))[0, ])

                plotsuffix = ''
                if len(epochs_modelssaved
                       ) > 0 and epochs_modelssaved[-1] == epoch:
                    plotsuffix = '_best'
                else:
                    plotsuffix = '_last'
                log_plot_samples(Y_vals,
                                 Y_preds,
                                 nbsamples=nbsamples,
                                 fname=os.path.splitext(params_savefile)[0] +
                                 '-fig_samples_' + trialstr + plotsuffix +
                                 '.png',
                                 vocoder=self._model.vocoder,
                                 title='E{}'.format(epoch))

            epochs_durs.append(time.time() - timeepochstart)
            print_log('    ET: {}   max TT: {}s   train ~time left: {}'.format(
                time2str(epochs_durs[-1]),
                time2str(
                    np.median(epochs_durs[-10:]) * cfg.train_max_nbepochs),
                time2str(
                    np.median(epochs_durs[-10:]) *
                    (cfg.train_max_nbepochs - epoch))))

            self.saveTrainingState(os.path.splitext(params_savefile)[0] +
                                   '-trainingstate-last.pkl',
                                   cfg=cfg,
                                   printfn=print_log,
                                   extras={
                                       'cost_val': cost_val,
                                       'best_val': best_val,
                                       'costs': costs,
                                       'epochs_modelssaved':
                                       epochs_modelssaved,
                                       'epochs_durs': epochs_durs,
                                       'nbnodecepochs': nbnodecepochs,
                                       'generator_updates': generator_updates,
                                       'epoch': epoch
                                   })

            if nbnodecepochs >= cfg.train_cancel_nodecepochs:  # pragma: no cover
                print_log(
                    'WARNING: validation error did not decrease for {} epochs. Early stop!'
                    .format(cfg.train_cancel_nodecepochs))
                break

        if best_val is None:
            raise ValueError('No model has been saved during training!')
        return {
            'epoch_stopped':
            epoch,
            'worst_val':
            worst_val,
            'best_epoch':
            epochs_modelssaved[-1] if len(epochs_modelssaved) > 0 else -1,
            'best_val':
            best_val
        }
示例#50
0
def debug(f, *args, **kwargs):
    pdb = Pdb(color_scheme='Linux')
    return pdb.runcall(f, *args, **kwargs)
示例#51
0
def debug(f,*args, **kwargs):
    # allows arbitrarily calling debugger for a function. Press "c" to resume
    # the function; press "s" to step through each line of the function
    from IPython.core.debugger import Pdb
    pdb = Pdb(color_scheme='Linux')
    return pdb.runcall(f,*args, **kwargs)
示例#52
0
Shankar Kulumani		GWU		[email protected]
"""

from pupil import circle_detector
from pupil.detectors import detector_2d
from pupil.detectors import detector_3d
from pupil import methods_python
from pupil.video_capture.file_backend import Frame

import cv2
import numpy as np
import av

import itertools
from IPython.core.debugger import Pdb
ipdb = Pdb()
import argparse

def nothing(x):
    pass

def load_distortion_map(filename):
    """Load the distortion map from the file and return as map_x, map_y
    """
    fs = cv2.FileStorage(filename, cv2.FileStorage_READ)
    map_x = fs.getNode('map_x').mat()
    map_y = fs.getNode('map_y').mat()
    fs.release()
    return (map_x, map_y)

def threshold_example():
示例#53
0
# d comes from a JSON payload we don't control
d = {'first': 'v1', 'second': 'v2', 'fourth': 'v4'}
# keys also comes from a JSON payload we don't control
keys = ('first', 'second', 'third', 'fourth')


def do_something_with_value(value):
    print(value)


from IPython.core.debugger import Pdb
ipdb = Pdb()
ipdb.set_trace()  # we place a breakpoint her

for key in keys:
    do_something_with_value(d[key])

print('Validation done.')
示例#54
0
 def __init__(self, *args, **kwargs):
     self.skip = kwargs.get('skip', None)
     Pdb.__init__(self, *args, **kwargs)
     self.prompt = Pdbi.PROMPT
示例#55
0
def iset_trace():
    from IPython.core.debugger import Pdb
    Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
示例#56
0
def idebug(f, *args, **kwds):
    from IPython.core.debugger import Pdb
    pdb = Pdb(color_scheme='Linux')
    return pdb.runcall(f, *args, **kwds)
示例#57
0
def init_neurons_corr_pnr(Yc, dims, gSig, gSiz, thresh_init, min_corr, min_pnr, bd, min_pixel, center_psf, filter_data_centering, **kwargs):
    """
    using greedy method to initialize neurons by selecting pixels with large
    local correlation and large peak-to-noise ratio
    
    """    
    from time import time
    start = time()
    deconvolve_options = {'bl': None,
                          'c1': None,
                          'g': None,
                          'sn': None,
                          'p': 1,
                          'approach': 'constrained foopsi',
                          'method': 'oasis',
                          'bas_nonneg': True,
                          'noise_range': [.25, .5],
                          'noise_method': 'logmexp',
                          'lags': 5,
                          'fudge_factor': 1.0,
                          'verbosity': None,
                          'solvers': None,
                          'optimize_g': 1,
                          'penalty': 1}

    # Pdb().set_trace()

    duration = Yc.shape[0]    
    data_filtered = np.zeros((duration, dims[0], dims[1]))
    data_raw = np.zeros((duration, np.prod(dims)))

    # spatially filter data
    ksize = tuple((3*gSig) // 2 * 2 + 1)    
    chunk_size  = Yc.chunks[0]
    
    for i in range(0, duration+chunk_size, chunk_size):
        data = Yc[i:i+chunk_size,:]
        data_raw[i:i+chunk_size,:] = data.copy()
        for j, frame in enumerate(data):        
            if center_psf:
                tmp = cv2.GaussianBlur(frame.reshape(dims), ksize = ksize, sigmaX = gSig[0], sigmaY = gSig[1], borderType=1)
                tmp2 = cv2.boxFilter(frame.reshape(dims), ddepth=-1, ksize = ksize, borderType = 1)
                data_filtered[i+j] = tmp - tmp2
            else:
                tmp = cv2.GaussianBlur(frame.reshape(dims), ksize = ksize, sigmaX = gSig[0], sigmaY = gSig[1], borderType=1)
                data_filtered[i+j] = tmp
    
    # Pdb().set_trace()

    # compute peak-to-noise ratio    
    if filter_data_centering:
        data_filtered -= data_filtered.mean(axis=0)    
    data_max = np.max(data_filtered, axis=0)    
    noise_pixel = get_noise_fft(data_filtered)
    pnr = np.divide(data_max, noise_pixel)

    # remove small values and only keep pixels with large fluorescence signals
    tmp_data = np.copy(data_filtered)
    tmp_data[tmp_data < thresh_init * noise_pixel] = 0

    # compute correlation image
    cn = local_correlations_fft(tmp_data)
    del(tmp_data)
    if np.isnan(cn).sum(): print("nan pixels in cn")
    
    # screen seed pixels as neuron centers
    v_search = cn * pnr
    v_search[(cn < min_corr) | (pnr < min_pnr)] = 0
    ind_search = (v_search <= 0)  # indicate whether the pixel has
    # been searched before. pixels with low correlations or low PNRs are
    # ignored directly. ind_search[i]=0 means the i-th pixel is still under
    # consideration of being a seed pixel

    # pixels near the boundaries are ignored because of artifacts
    ind_bd = np.zeros(dims).astype(np.bool)  # indicate boundary pixels
    if bd > 0:
        ind_bd[:bd, :] = True
        ind_bd[-bd:, :] = True
        ind_bd[:, :bd] = True
        ind_bd[:, -bd:] = True
    ind_search[ind_bd] = True

    # creating variables for storing the results    
    max_number = np.int32(np.ceil(((ind_search.size - ind_search.sum())/3))) # changed from original cnmfe
    if max_number == 0: max_number = 1 # if there is just one pixel, it's worth looking at it
    Ain = np.zeros(shape=(max_number, dims[0], dims[1]),dtype=np.float32)  # neuron shapes / spatial footprint
    Cin = np.zeros(shape=(max_number, duration),dtype=np.float32)  # de-noised traces
    Sin = np.zeros(shape=(max_number, duration),dtype=np.float32)  # spiking # activity
    Cin_raw = np.zeros(shape=(max_number, duration),dtype=np.float32)  # raw traces
    center = np.zeros(shape=(max_number,2), dtype= np.int)  # neuron centers

    num_neurons = 0  # number of initialized neurons
    continue_searching = True
    min_v_search = min_corr * min_pnr
    count = 0
    
    while continue_searching:
        # local maximum, for identifying seed pixels in following steps
        v_search[(cn < min_corr) | (pnr < min_pnr)] = 0
        v_search[ind_search] = 0
        tmp_kernel = np.ones((gSiz // 3) * 2)
        v_max = cv2.dilate(v_search, tmp_kernel)

        # automatically select seed pixels as the local maximums
        v_max[(v_search != v_max) | (v_search < min_v_search)] = 0
        v_max[ind_search] = 0
        [rsub_max, csub_max] = v_max.nonzero()  # subscript of seed pixels
        local_max = v_max[rsub_max, csub_max]
        n_seeds = len(local_max)  # number of candidates
        if n_seeds == 0:
            # no more candidates for seed pixels            
            break
        else:
            # order seed pixels according to their corr * pnr values
            ind_local_max = local_max.argsort()[::-1]
        img_vmax = np.median(local_max)

        # try to initialize neurons given all seed pixels
        for ith_seed, idx in enumerate(ind_local_max):
            r = rsub_max[idx]
            c = csub_max[idx]
            ind_search[r, c] = True  # this pixel won't be searched
            if v_search[r, c] < min_v_search:
                # skip this pixel if it's not sufficient for being a seed pixel
                continue

            # roughly check whether this is a good seed pixel            
            y0 = data_filtered[:, r,c]
            if np.max(y0) < thresh_init * noise_pixel[r, c]:
                v_search[r, c] = 0
                continue

            # does it correlate with another neuron                        
            if Ain[:, r, c].sum() > 0:
                testp = Cin_raw[Ain[:, r, c] > 0]
                rr = [scipy.stats.pearsonr(y0, cc)[0] for cc in testp]
                if np.max(rr) > .7:
                    v_search[r, c] = 0
                    continue

            # new heuristic from Caiman
            y0_diff = np.diff(y0)
            if y0_diff.max() < 3*y0_diff.std():
                v_search[r,c] = 0
                continue


            # crop a small box for estimation of ai and ci
            r_min = max(0, r - gSiz[0])
            r_max = min(dims[0], r + gSiz[0] + 1)
            c_min = max(0, c - gSiz[1])
            c_max = min(dims[1], c + gSiz[1] + 1)
            nr = r_max - r_min
            nc = c_max - c_min
            patch_dims = (nr, nc)  # patch dimension

            index_box = np.zeros(dims)
            index_box[r_min:r_max,c_min:c_max] = True
            index_box_1d = np.where(index_box.flatten())[0]
            data_raw_box = data_raw[:,index_box_1d]
            data_filtered_box = data_filtered[:, r_min:r_max, c_min:c_max].reshape(-1, nr * nc)
            # index of the seed pixel in the cropped box
            ind_ctr = np.ravel_multi_index((r - r_min, c - c_min),dims=(nr, nc))

            # neighbouring pixels to update after initializing one neuron
            r2_min = max(0, r - 2 * gSiz[0])
            r2_max = min(dims[0], r + 2 * gSiz[0] + 1)
            c2_min = max(0, c - 2 * gSiz[1])
            c2_max = min(dims[1], c + 2 * gSiz[1] + 1)
            try:                        
                ai, ci_raw, ind_success = extract_ac(data_filtered_box,data_raw_box, ind_ctr, patch_dims, filter_data_centering)
            except ValueError:
                Pdb().set_trace()

            if (np.sum(ai > 0) < min_pixel) or (not ind_success):
                # bad initialization. discard and continue
                continue
            else:
                # cheers! good initialization.
                center[num_neurons] = [r, c]
                Ain[num_neurons, r_min:r_max, c_min:c_max] = ai
                Cin_raw[num_neurons] = ci_raw.squeeze()
                if deconvolve_options:
                    # deconvolution                          
                    # if count == 1:
                    #     Pdb().set_trace()
                    # count += 1                     
                    ci, si, tmp_options, baseline, c1 = deconvolve_ca(ci_raw, deconvolve_options.copy())                    
                    
                    Cin[num_neurons] = ci
                    Sin[num_neurons] = si
                else:
                    # no deconvolution
                    baseline = np.median(ci_raw)
                    ci_raw -= baseline
                    ci = ci_raw.copy()
                    ci[ci < 0] = 0
                    Cin[num_neurons] = ci.squeeze()

                # remove the spatial-temporal activity of the initialized
                # and update correlation image & PNR image
                # update the raw data                
                ac = ci[:,np.newaxis].dot(ai.flatten()[np.newaxis,:])
                data_raw[:,index_box_1d] -= ac
                
                # spatially filtered the neuron shape
                tmp_img = Ain[num_neurons, r2_min:r2_max, c2_min:c2_max]
                if center_psf:
                    ai_filtered = cv2.GaussianBlur(tmp_img, ksize=ksize,sigmaX=gSig[0], sigmaY=gSig[1],borderType=cv2.BORDER_REFLECT) \
                        - cv2.boxFilter(tmp_img, ddepth=-1,ksize=ksize, borderType=cv2.BORDER_REFLECT)
                else:
                    ai_filtered = cv2.GaussianBlur(tmp_img, ksize=ksize,sigmaX=gSig[0], sigmaY=gSig[1],borderType=cv2.BORDER_REFLECT)
                
                # update the filtered data
                data_filtered[:, r2_min:r2_max, c2_min:c2_max] -= ai_filtered[np.newaxis, ...] * ci[..., np.newaxis, np.newaxis]
                data_filtered_box = data_filtered[:,r2_min:r2_max, c2_min:c2_max].copy()

                # update PNR image
                if filter_data_centering:
                    data_filtered_box -= data_filtered_box.mean(axis=0)
                max_box = np.max(data_filtered_box, axis=0)
                noise_box = noise_pixel[r2_min:r2_max, c2_min:c2_max]
                pnr_box = np.divide(max_box, noise_box)
                pnr[r2_min:r2_max, c2_min:c2_max] = pnr_box
                pnr_box[pnr_box < min_pnr] = 0

                # update correlation image
                data_filtered_box[data_filtered_box < thresh_init * noise_box] = 0
                cn_box = local_correlations_fft(data_filtered_box)
                cn_box[np.isnan(cn_box) | (cn_box < 0)] = 0
                cn[r_min:r_max, c_min:c_max] = cn_box[(r_min - r2_min):(r_max - r2_min), (c_min - c2_min):(c_max - c2_min)]
                cn_box = cn[r2_min:r2_max, c2_min:c2_max]
                cn_box[cn_box < min_corr] = 0

                # update v_search
                v_search[r2_min:r2_max, c2_min:c2_max] = cn_box * pnr_box
                # avoid searching nearby pixels
                v_search[r_min:r_max, c_min:c_max] *= (ai < np.max(ai) / 2.)

                # increase the number of detected neurons
                num_neurons += 1  #
                if num_neurons == max_number:
                    continue_searching = False
                    break


    # print('In total, ', num_neurons, 'neurons were initialized.')
    
    A = Ain[:num_neurons]
    C = Cin[:num_neurons]
    C_raw = Cin_raw[:num_neurons]
    S = Sin[:num_neurons]
    center = center[:num_neurons]

    # print("Time in init_neurons_corr_pnr ", time() -start)
    return A, C, C_raw, S, center