def set_trace(): # put this function right before the exception occurs to debug. Press "c" # to resume the program; from IPython.core.debugger import Pdb Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def calc_BGW(image_fn, output, fformat='GTiff', bands=np.arange(6) + 1, ndv=-9999): assert min(bands) > 0, 'Bands specified must be above 0 (1 indexed)' # TM reflectance tasseled cap coefficients refW = np.array([0.0315, 0.2021, 0.3102, 0.1954, -0.6806, -0.6109]) refG = np.array([-0.1603, -0.2189, -0.4934, 0.7940, -0.0002, -0.1446]) refB = np.array([0.2043, 0.4158, 0.5524, 0.5741, 0.3124, 0.2330]) bgw = ['TC brightness', 'TC greenness', 'TC wetness'] # Open input image image_ds = gdal.Open(image_fn, gdal.GA_ReadOnly) n_band = bands.size + 3 image = np.zeros((image_ds.RasterYSize, image_ds.RasterXSize, n_band), dtype=gdal_array.GDALTypeCodeToNumericTypeCode( image_ds.GetRasterBand(1).DataType)) for i, b in enumerate(bands): image[:, :, i] = image_ds.GetRasterBand(b).ReadAsArray() test = image[2500:3000, 2500:3000, :] from IPython.core.debugger import Pdb Pdb().set_trace() image[:, :, bands.size] = np.tensordot(image, refB, axis=(2, 0)) image[:, :, bands.size + 1] = np.tensordot(image, refG, axis=(2, 0)) image[:, :, bands.size + 2] = np.tensordot(image, refW, axis=(2, 0)) from IPython.core.debugger import Pdb Pdb().set_trace() # Init BGW # BGW = np.zeros((image_ds.RasterYSize, image_ds.RasterXSize, 3), # dtype=gdal_array.GDALTypeCodeToNumericTypeCode( # image_ds.GetRasterBand(1).DataType)) # # # Init mask # mask = np.ones((image_ds.RasterYSize, image_ds.RasterXSize), dtype=np.uint8) # # # Loop through bands calculating BGW # for i, b in enumerate(bands): # # Open band # band = image_ds.GetRasterBand(b).ReadAsArray() # # # Calculate BGW # BGW[:, :, 0] = BGW[:, :, 0] + band * refB[i] # BGW[:, :, 1] = BGW[:, :, 1] + band * refG[i] # BGW[:, :, 2] = BGW[:, :, 2] + band * refW[i] # # # Update mask # mask = np.logical_and(mask == 1, band != ndv).astype(np.uint8) # Apply mask masked = (mask == 0) for b in range(BGW.shape[2]): BGW[mask == 0, b] = ndv # Setup for output driver = gdal.GetDriverByName(fformat) out_ds = driver.Create(output, image_ds.RasterXSize, image_ds.RasterYSize, 3, image_ds.GetRasterBand(1).DataType) for b in range(BGW.shape[2]): out_ds.GetRasterBand(b + 1).WriteArray(BGW[:, :, b]) out_ds.GetRasterBand(b + 1).SetNoDataValue(ndv) out_ds.GetRasterBand(b + 1).SetDescription(bgw[b]) out_ds.SetProjection(image_ds.GetProjection()) out_ds.SetGeoTransform(image_ds.GetGeoTransform()) out_ds = None
from hiveio import par_init # noqa import fblearner.flow.projects.dper.flow_types as T import fblearner.flow.projects.dper.utils.assemble as assemble_utils import fblearner.flow.projects.dper.utils.data as data_utils import fblearner.flow.projects.dper.utils.visualize as vis_utils import fblearner.flow.projects.dper.workflows.ads_config as default_config import fblearner.flow.projects.dper.ifbpy.compute_meta as compute_meta from fblearner.flow.projects.dper.ifbpy.execution import test_model_locally import fblearner.flow.projects.dper.utils.visualize as vis_utils import fblearner.flow.projects.dper.utils.perf_estimator_execution as perf_estimator_execution import json core.GlobalInit(['ifbpy']) from IPython.core.debugger import Pdb ipdb = Pdb() # In[ ]: # when testing a particular flow, load model options from json file, and pass it to model_options # local_prod_jason_file="/home/dongli/fbsource/fbcode/caffe2/caffe2/net_config/33252482/prod_model.json" # with open(local_prod_jason_file, 'r') as f: # prod_model_options = sparse_nn.MODEL_OPTIONS.decode(json.loads(f.read())) # print(prod_model_options) # In[ ]: preproc_options = default_config.DEFAULT_PREPROC_OPTIONS # when testing a particular flow, load model options from json file # load model preproc options from json file
def synthesize( fs, f0s, SPEC, NM=None, wavlen=None, ener_multT0=False, nm_cont=False # If False, force binary state of the noise mask (by thresholding at 0.5) , nm_lowpasswinlen=9, hp_f0coef=0.5 # factor of f0 for the cut-off of the high-pass filter (def. 0.5*f0) , antipreechohwindur=0.001 # [s] Use to damp the signal at the beginning of the signal AND at the end of it # Following options are for post-processing the features, after the generation/transformation and thus before waveform synthesis , pp_f0_rmsteps=False # Removes steps in the f0 curve # (see sigproc.resampling.f0s_rmsteps(.) ) , pp_f0_smooth=None # Smooth the f0 curve using median and FIR filters of given window duration [s] , pp_atten1stharminsilences=None # Typical value is -25 , verbose=1): winnbper = 4 # Number of periods in a synthesis windows. It still contains only one single pulse, but leaves space for the VTF to decay without being cut abruptly. # Copy the inputs to avoid modifying them f0s = f0s.copy() SPEC = SPEC.copy() if not NM is None: NM = NM.copy() else: NM = np.zeros(SPEC.shape) # Check the size of the inputs if f0s.shape[0] != SPEC.shape[0]: raise ValueError( 'F0 size {} and spectrogram size {} do not match'.format( f0s.shape[0], SPEC.shape[0])) # pragma: no cover if not NM is None: if SPEC.shape != NM.shape: raise ValueError( 'spectrogram size {} and NM size {} do not match.'.format( SPEC.shape, NM.shape)) # pragma: no cover if wavlen == None: wavlen = int(np.round(f0s[-1, 0] * fs)) dftlen = (SPEC.shape[1] - 1) * 2 shift = np.median(np.diff(f0s[:, 0])) if verbose > 0: print( 'PML Synthesis (dur={}s, fs={}Hz, f0 in [{:.0f},{:.0f}]Hz, shift={}s, dftlen={})' .format(wavlen / float(fs), fs, np.min(f0s[:, 1]), np.max(f0s[:, 1]), shift, dftlen)) # Prepare the features # Enforce continuous f0 f0s[:, 1] = np.interp(f0s[:, 0], f0s[f0s[:, 1] > 0, 0], f0s[f0s[:, 1] > 0, 1]) # If asked, removes steps in the f0 curve if pp_f0_rmsteps: f0s = sp.f0s_rmsteps(f0s) # If asked, smooth the f0 curve using median and FIR filters if not pp_f0_smooth is None: print(' Smoothing f0 curve using {}[s] window'.format(pp_f0_smooth)) import scipy.signal as sig lf0 = np.log(f0s[:, 1]) bcoefslen = int(0.5 * pp_f0_smooth / shift) * 2 + 1 lf0 = sig.medfilt(lf0, bcoefslen) bcoefs = np.hamming(bcoefslen) bcoefs = bcoefs / sum(bcoefs) lf0 = sig.filtfilt(bcoefs, [1], lf0) f0s[:, 1] = np.exp(lf0) winlenmax = getwinlen(np.min(f0s[:, 1]), fs, winnbper) if winlenmax > dftlen: warnings.warn( '\n\nWARNING: The maximum window length ({}) is bigger than the DFT length ({}). Please, increase the DFT length of your spectral features (the second dimension) or check if the f0 curve has extremly low values and try to clip them to higher values (at least higher than 50Hz). The f0 curve has been clipped to {}Hz.\n\n' .format(winlenmax, dftlen, winnbper * fs / float(dftlen))) # pragma: no cover f0s[:, 1] = np.clip(f0s[:, 1], winnbper * fs / float(dftlen - 2), 1e6) if not NM is None: # Remove noise below f0, as it is supposed to be already the case for n in range(NM.shape[0]): NM[n, :int((float(dftlen) / fs) * 2 * f0s[n, 1])] = 0.0 if not nm_cont: print(' Forcing binary noise mask') NM[NM <= 0.5] = 0.0 # To be sure that voiced segments are not hoarse NM[NM > 0.5] = 1.0 # To be sure the noise segments are fully noisy # Generate the pulse positions [1](2) (i.e. the synthesis instants, the GCIs in voiced segments) ts = [0.0] while ts[-1] < float(wavlen) / fs: cf0 = np.interp(ts[-1], f0s[:, 0], f0s[:, 1]) if cf0 < 50.0: cf0 = 50 ts.append(ts[-1] + (1.0 / cf0)) ts = np.array(ts) f0s = np.vstack((ts, np.interp(ts, f0s[:, 0], f0s[:, 1]))).T # Resample the features to the pulse positions # Spectral envelope uses the nearest, to avoid over-smoothing SPECR = np.zeros((f0s.shape[0], dftlen / 2 + 1)) for n, t in enumerate(f0s[:, 0]): # Nearest: Way better for plosives idx = int(np.round(t / shift)) idx = np.clip(idx, 0, SPEC.shape[0] - 1) SPECR[n, :] = SPEC[idx, :] # Keep trace of the median energy [dB] over the whole signal ener = np.mean(SPECR, axis=1) idxacs = np.where(sp.mag2db(ener) > sp.mag2db(np.max(ener)) - 30)[0] # Get approx active frames # TODO Param enermed = sp.mag2db(np.median(ener[idxacs])) # Median energy [dB] ener = sp.mag2db(ener) # Resample the noise feature to the pulse positions # Smooth the frequency response of the mask in order to avoid Gibbs # (poor Gibbs nobody want to see him) nm_lowpasswin = np.hanning(nm_lowpasswinlen) nm_lowpasswin /= np.sum(nm_lowpasswin) NMR = np.zeros((f0s.shape[0], dftlen / 2 + 1)) for n, t in enumerate(f0s[:, 0]): idx = int(np.round(t / shift)) # Nearest is better for plosives idx = np.clip(idx, 0, NM.shape[0] - 1) NMR[n, :] = NM[idx, :] if nm_lowpasswinlen > 1: NMR[n, :] = scipy.signal.filtfilt(nm_lowpasswin, [1.0], NMR[n, :]) NMR = np.clip(NMR, 0.0, 1.0) # The complete waveform that we will fill with the pulses wav = np.zeros(wavlen) # Half window on the left of the synthesized segment to avoid pre-echo dampinhwin = np.hanning( 1 + 2 * int(np.round(antipreechohwindur * fs))) # 1ms forced dampingwindow dampinhwin = dampinhwin[:(len(dampinhwin) - 1) / 2 + 1] for n, t in enumerate(f0s[:, 0]): f0 = f0s[n, 1] if verbose > 1: print "\rPM Synthesis (python) t={:4.3f}s f0={:3.3f}Hz ".format( t, f0), # Window's length # TODO It should be ensured that the beggining and end of the # noise is within the window. Nothing is doing this currently! winlen = getwinlen(f0, fs, winnbper) # TODO We also assume that the VTF's decay is shorter # than winnbper-1 periods (dangerous with high pitched and tense voice). if winlen > dftlen: raise ValueError( 'The window length ({}) is bigger than the DFT length ({}). Please, increase the dftlen of your spectral features or check if the f0 curve has extremly low values and try to clip them to higher values (at least higher than 50[Hz])' .format(winlen, dftlen)) # pragma: no cover # Set the rough position of the pulse in the window (the closest sample) # We keep a third of the window (1 period) on the left because the # pulse signal is minimum phase. And 2/3rd (remaining 2 periods) # on the right to let the VTF decay. pulseposinwin = int((1.0 / winnbper) * winlen) # The sample indices of the current pulse wrt. the final waveform winidx = int(round(fs * t)) + np.arange(winlen) - pulseposinwin # Build the pulse spectrum # Let start with a Dirac S = np.ones(dftlen / 2 + 1, dtype=np.complex64) # Add the delay to place the Dirac at the "GCI": exp(-j*2*pi*t_i) delay = -pulseposinwin - fs * (t - int(round(fs * t)) / float(fs)) S *= np.exp((delay * 2j * np.pi / dftlen) * np.arange(dftlen / 2 + 1)) # Add the spectral envelope # Both amplitude and phase E = SPECR[n, :] # Take the amplitude from the given one if hp_f0coef != None: # High-pass it to avoid any residual DC component. fcut = hp_f0coef * f0 if not pp_atten1stharminsilences is None and ener[ n] - enermed < pp_atten1stharminsilences: fcut = 1.5 * f0 # Try to cut between first and second harm HP = sp.butter2hspec(fcut, 4, fs, dftlen, high=True) E *= HP # Not necessarily good as it is non-causal, so make it causal... # ... together with the VTF response below. # Build the phase of the envelope from the amplitude E = sp.hspec2minphasehspec(E, replacezero=True) # We spend 2 FFT here! S *= E # Add it to the current pulse # Add energy correction wrt f0. # STRAIGHT and AHOCODER vocoders do it. # (why ? to equalize the energy when changing the pulse's duration ?) if ener_multT0: S *= np.sqrt(fs / f0) # Generate the segment of Gaussian noise # Use mid-points before/after pulse position if n > 0: leftbnd = int(np.round(fs * 0.5 * (f0s[n - 1, 0] + t))) else: leftbnd = int(np.round(fs * (t - 0.5 / f0s[n, 1]))) # int(0) if n < f0s.shape[0] - 1: rightbnd = int(np.round(fs * 0.5 * (t + f0s[n + 1, 0]))) - 1 else: rightbnd = int(np.round( fs * (t + 0.5 / f0s[n, 1]))) #rightbnd=int(wavlen-1) gausswinlen = rightbnd - leftbnd # The length of the noise segment gaussnoise4win = np.random.normal(size=(gausswinlen)) # The noise GN = np.fft.rfft(gaussnoise4win, dftlen) # Move the noise to freq domain # Normalize it by its energy (@Yannis, That's your answer at SSW9!) GN /= np.sqrt(np.mean(np.abs(GN)**2)) # Place the noise within the pulse's window delay = (pulseposinwin - (leftbnd - winidx[0])) GN *= np.exp((delay * 2j * np.pi / dftlen) * np.arange(dftlen / 2 + 1)) # Add it to the pulse spectrum, under the condition of the mask S *= GN**NMR[n, :] # That's it! the pulse spectrum is ready! # Move it to time domain deter = np.fft.irfft(S)[0:winlen] # Add half window on the left of the synthesized segment # to avoid any possible pre-echo deter[:leftbnd - winidx[0] - len(dampinhwin)] = 0.0 deter[leftbnd - winidx[0] - len(dampinhwin):leftbnd - winidx[0]] *= dampinhwin # Add half window on the right # to avoid cutting the VTF response abruptly deter[-len(dampinhwin):] *= dampinhwin[::-1] # Write the synthesized segment in the final waveform if winidx[0] < 0 or winidx[-1] >= wavlen: # The window is partly outside of the waveform ... # ... thus copy only the existing part itouse = np.logical_and(winidx >= 0, winidx < wavlen) wav[winidx[itouse]] += deter[itouse] else: wav[winidx] += deter if verbose > 1: print '\r \r', if verbose > 2: # pragma: no cover import matplotlib.pyplot as plt plt.ion() _, axs = plt.subplots(3, 1, sharex=True, sharey=False) times = np.arange(len(wav)) / float(fs) axs[0].plot(times, wav, 'k') axs[0].set_ylabel('Waveform\nAmplitude') axs[0].grid() axs[1].plot(f0s[:, 0], f0s[:, 1], 'k') axs[1].set_ylabel('F0\nFrequency [Hz]') axs[1].grid() axs[2].imshow(sp.mag2db(SPEC).T, origin='lower', aspect='auto', interpolation='none', extent=(f0s[0, 0], f0s[-1, 0], 0, 0.5 * fs)) axs[2].set_ylabel('Amp. Envelope\nFrequency [Hz]') from IPython.core.debugger import Pdb Pdb().set_trace() return wav
def set_trace(): Pdb(color_scheme="Linux").set_trace(sys.getframe().f_back)
def set_trace(): from IPython.core.debugger import Pdb Pdb().set_trace(sys._getframe().f_back)
def _init_pdb(): p = Pdb(def_colors) p.rcLines += def_exec_lines return p
def hook_layer(grad): Pdb().set_trace() print("grad z latent",grad)
def set_trace(frame=None): update_stdout() wrap_sys_excepthook() if frame is None: frame = sys._getframe().f_back Pdb(def_colors).set_trace(frame)
def exportfile( srcf, # Source file to export destf, # Destination path to export to resample=None, # [Hz] Resample the waveform the given frequency (e.g. 44100Hz). highpass_fcut=None, # [Hz] High-pass the waveform according to the given frequency normalize=None, # [dB] Normalise the overall file amplitude to the given amplitude (e.g. -32dB) aligndelayref=None, # [filepath] Align temporally the source waveform to the given waveform file. usepcm16=False, # Save the waveform using PCM16 sample format channelid=0 # Use only the first channel (left) if multiple channels are found. ): orifs = None if resample == None and normalize == None and usepcm16 == False and aligndelayref == None and highpass_fcut == None: # Copy/Paste the original file, without normalization shutil.copy2(srcf, destf) else: wav, orifs, enc = wavread(srcf) if len(wav.shape) > 1: wav = wav[:, channelid] # Keep only channelid in case multiple tracks are present. wavfs = orifs ##print("{:10.3f}".format(len(wav)/float(wavfs))+'s '+str(wavfs)+'Hz '+enc) if usepcm16: enc = 'pcm16' if resample != None: wav = resampling.resample(wav, wavfs, resample) wavfs = resample if highpass_fcut != None: (b, a) = scipy.signal.butter(4, highpass_fcut / (0.5 * wavfs), btype='high') wav = scipy.signal.filtfilt(b, a, wav) if normalize != None: wav_spn = sp.level_normalise(wav, wavfs, level=normalize, warn_onclip=False) # wav_sv56, _ = interfaces.sv56demo(wav, wavfs, level=normalize) if 0: import matplotlib.pyplot as plt plt.ion() plt.plot(wav, 'k') plt.plot(wav_sv56, 'b') plt.plot(wav_spn, 'r') from IPython.core.debugger import Pdb Pdb().set_trace() wav = wav_spn if aligndelayref != None: # Re-load the first tag as reference refwav, refwavfs, refenc = wavread(aligndelayref) wav = sp.align_delay(wav, wavfs, refwav, refwavfs) wavwrite(destf, wav, fs=wavfs, enc=enc) return orifs
""" This is an implementation of the backpropagation for feed forward network REFERENCE --------- Tom Mitchell, Machine Learning, March 1, 1997 """ import numpy as np from IPython.core.debugger import Pdb debugger = Pdb() def update_weights(W, V, X, T, lr=0.02): """Backpropagation """ _, D = X.shape # D input features M, _ = V.shape # M hidden units N, K = T.shape # N number of examples, K classes b = np.ones((N, 1), dtype=np.float32) X1 = np.concatenate((b, X), axis=1) # stochastic gradient decent indexes = np.random.choice(N, N, replace=False) for i in indexes: Xi = X1[i, :] Ti = T[i, :] H = forward(W, Xi, sigmoid) Z = forward(V, H, softmax)
def pointwise_accuracy_stats(query, target_set, count, raw_pred, prefix=''): pred = raw_pred.argmax(dim=1).int() #Pdb().set_trace() non_zero_ind = (query > 0) copy_accuracy = (query[non_zero_ind] == pred[non_zero_ind]).sum().float() / non_zero_ind.sum() copy_point_total = non_zero_ind.sum().item() unique_point_ind = (target_set[:, 0, :] == target_set[:, 1, :]) unique_point_ind = unique_point_ind * (~non_zero_ind) unique_point_accuracy = ( target_set[:, 0, :][unique_point_ind] == pred[unique_point_ind]).sum().float() / unique_point_ind.sum() unique_point_total = unique_point_ind.sum().item() ambiguous_point_ind = ~(target_set[:, 0, :] == target_set[:, 1, :]) ambiguous_point_accuracy = (target_set[:, 0, :][ambiguous_point_ind] == pred[ambiguous_point_ind]).sum() ambiguous_point_accuracy += (target_set[:, 1, :][ambiguous_point_ind] == pred[ambiguous_point_ind]).sum() ambiguous_point_accuracy = ambiguous_point_accuracy.float() / \ ambiguous_point_ind.sum() ambiguous_point_total = ambiguous_point_ind.sum().item() total_points = query.numel() #Pdb().set_trace() strict_acc_count = float((target_set[:, 0, :] == pred).all(dim=1).sum() + ( ((target_set[:, 1, :] == pred).all(dim=1).int()) * (count - 1)).sum()) strict_accuracy = strict_acc_count / float(pred.size(0)) lac = ((target_set[:, 0, :] == pred) | (target_set[:, 1, :] == pred)).all(dim=1).sum().float().item() la = lac / float(pred.size(0)) lousy_accuracy = ( target_set[:, 0, :] == pred).int() + (target_set[:, 1, :] == pred).int() * ( count - 1).unsqueeze(1).expand_as(target_set[:, 0, :]) lousy_accuracy = ((lousy_accuracy > 0).sum(dim=1) == 81) lousy_acc_count = lousy_accuracy.sum().float() lousy_accuracy = (lousy_acc_count / pred.shape[0]).item() corrected_accuracy = [] for i, x in enumerate(pred): corrected_accuracy.append(is_safe_sudoku(x, query[i], 9)) corrected_accuracy = torch.tensor(corrected_accuracy).float().mean() if lac != lousy_acc_count: Pdb().set_trace() rv = OrderedDict() rv[prefix + 'copy_acc'] = copy_accuracy.item() rv[prefix + 'unique_pt_acc'] = unique_point_accuracy.item() rv[prefix + 'amb_pt_acc'] = ambiguous_point_accuracy.item() rv[prefix + 'lousy_acc'] = lousy_accuracy rv[prefix + 'strict_acc'] = strict_accuracy rv[prefix + 'total_pts'] = total_points rv[prefix + 'copy_pts'] = copy_point_total rv[prefix + 'unique_pts'] = unique_point_total rv[prefix + 'amb_pts'] = ambiguous_point_total rv[prefix + 'strict_count'] = strict_acc_count rv[prefix + 'lousy_count'] = lac rv[prefix + 'corrected_acc'] = corrected_accuracy.item() return rv
def analysisf(self, fwav, ff0, f0_min, f0_max, fspec, faper, fvuv, **kwargs): print('Extracting WORLD features from: ' + fwav) wav, fs, _ = sp.wavread(fwav) if ('preproc_hp' in kwargs): if kwargs['preproc_hp'] == 'auto': kwargs['preproc_hp'] = f0_min self.preprocwav(wav, fs, highpass=kwargs['preproc_hp']) else: self.preprocwav(wav, fs) import pyworld as pw if 0: # Check direct copy re-synthesis without compression/encoding print(pw.__file__) # _f0, ts = pw.dio(wav, fs, f0_floor=f0_min, f0_ceil=f0_max, channels_in_octave=2, frame_period=self.shift*1000.0) _f0, ts = pw.dio(wav, fs, f0_floor=f0_min, f0_ceil=f0_max, channels_in_octave=2, frame_period=self.shift * 1000.0) # _f0, ts = pw.harvest(wav, fs) f0 = pw.stonemask(wav, _f0, ts, fs) SPEC = pw.cheaptrick(wav, f0, ts, fs, fft_size=self.dftlen) APER = pw.d4c(wav, f0, ts, fs, fft_size=self.dftlen) resyn = pw.synthesize(f0.astype('float64'), SPEC.astype('float64'), APER.astype('float64'), fs, self.shift * 1000.0) sp.wavwrite('resynth.wav', resyn, fs, norm_abs=True, force_norm_abs=True, verbose=1) from IPython.core.debugger import Pdb Pdb().set_trace() _f0, ts = pw.dio(wav, fs, f0_floor=f0_min, f0_ceil=f0_max, channels_in_octave=2, frame_period=self.shift * 1000.0) f0 = pw.stonemask(wav, _f0, ts, fs) SPEC = pw.cheaptrick(wav, f0, ts, fs, fft_size=self.dftlen) # SPEC = 10.0*np.sqrt(SPEC) # TODO Best gain correction I could find. Hard to find the good one between PML and WORLD different syntheses APER = pw.d4c(wav, f0, ts, fs, fft_size=self.dftlen) unvoiced = np.where(f0 < 20)[0] f0 = np.interp(ts, ts[f0 > 0], f0[f0 > 0]) f0 = np.log(f0) makedirs(os.path.dirname(ff0)) f0.astype('float32').tofile(ff0) vuv = np.ones(len(f0)) vuv[unvoiced] = 0 makedirs(os.path.dirname(fvuv)) vuv.astype('float32').tofile(fvuv) SPEC = self.compress_spectrum(SPEC, fs, self.spec_size) makedirs(os.path.dirname(fspec)) SPEC.astype('float32').tofile(fspec) APER = sp.linbnd2fwbnd(APER, fs, self.dftlen, self.aper_size) APER = sp.mag2db(APER) makedirs(os.path.dirname(faper)) APER.astype('float32').tofile(faper) # CMP = np.concatenate((f0.reshape((-1,1)), SPEC, APER, vuv.reshape((-1,1))), axis=1) # (This is not a necessity) if 0: import matplotlib.pyplot as plt plt.ion() resyn = self.synthesis(CMP) sp.wavwrite('resynth.wav', resyn, fs, norm_abs=True, force_norm_abs=True, verbose=1) from IPython.core.debugger import Pdb Pdb().set_trace()
def trueenv(lA, order, maxit=200, maxtol=1, usewin=True, presmooth_factor=None, debug=0): dftlen = (len(lA) - 1) * 2 if not presmooth_factor is None: lPA = trueenv(lA, int(order / presmooth_factor), maxit=maxit, maxtol=maxtol, presmooth_factor=None, debug=0) slim = int(0.25 * dftlen / order) lA[:slim] = np.real( lPA[:slim] ) # Correct only the bins "around" the DC ("around" defined by the order) if usewin: order = int(np.round(1.2 * order)) # [1] 1.66 win = np.hamming(2 * order + 1) win = win[(len(win) - 1) / 2:] lA = np.real(lA) lA0 = lA.copy() n = 0 # nb iterations maxdiff = np.inf lV = lA cc = np.zeros(1 + order) while n < maxit and maxdiff > maxtol: #print('iter: '+str(n)) ccp = np.fft.irfft(lA) ccp = ccp[:dftlen / 2 + 1] ccp[1:-1] *= 2 if usewin: ccd = ccp ccd[:1 + order] -= cc Ei = np.sqrt(np.sum(ccd[:1 + order]**2)) Eo = np.sqrt(np.sum(ccd[1 + order:]**2)) #Eo = np.sqrt(np.sum((cca[1+order:])**2)) lamb = np.sqrt((Ei + Eo) / Ei) cc = lamb * ccd[:1 + order] + cc # Eq. (5) in [1] # TODO Doesn't work !?!?! #lamb = (Ei+Eo)/Ei #cc = (lamb*win)*ccd[:1+order] + cc # Eq. (5) in [1] # TODO Doesn't work !?!?! #print('cc ener='+str(np.sqrt(np.sum(cc**2)))) #cc = lamb*win*ccp else: cc = ccp lV = np.fft.rfft(cc, dftlen) lV = np.real(lV) lA = np.maximum(lA, lV) # Max of log amplitudes maxdiff = np.max(lA0 - lV) # Can create over-shot #print('maxdiff='+str(maxdiff)) if debug > 0: import matplotlib.pyplot as plt plt.clf() plt.plot(lA0, 'k') #plt.plot(np.fft.rfft(ccp, dftlen), 'g') plt.plot(lV, 'r') plt.plot(lA, 'b') #plt.ylim((-55, -48)) plt.ylim((-0.02, 0.001)) plt.xlim((0.0, dftlen / 2)) from IPython.core.debugger import Pdb Pdb().set_trace() n += 1 #print('nb iter={}, maxdiff={}'.format(n,maxdiff)) if debug > 0: import matplotlib.pyplot as plt plt.clf() plt.ion() plt.plot(lA0, 'k') plt.plot(lA, 'b') plt.ylim((-10.0, 1)) from IPython.core.debugger import Pdb Pdb().set_trace() return lV
from __future__ import print_function import copy import torch.utils.data from IPython.core.debugger import Pdb from rlkit.data_management.images import normalize_image debug = Pdb().set_trace # Adapted from pytorch examples import torch import torch.utils.data from torch import nn, optim from torch.autograd import Variable from torch.nn import functional as F from torchvision.utils import save_image from rlkit.misc.eval_util import create_stats_ordered_dict from rlkit.misc.ml_util import ConstantSchedule from rlkit.pythonplusplus import identity from rlkit.torch import pytorch_util as ptu from rlkit.torch.core import PyTorchModule from rlkit.core import logger import os.path as osp from rlkit.envs.multitask.point2d import MultitaskImagePoint2DEnv import numpy as np class ACAI(PyTorchModule): def __init__( self,
def run(statement, globals=None, locals=None): Pdb(def_colors).run(statement, globals, locals)
def result_map(out, results, table, attr_columns, attr_funcs=None, **query_kwds): """ Populate `out` from data queried from saved record model results Args: out (np.ndarray): 2D or 3D (nband x nrow x ncol) array to fill result data into results (iterable): A list of :class:`HDF5ResultsStore` files table (str): The table to retrieve the data from attr_columns (tuple): Attributes from results table to map. The number of attributes given should be the same as the number of bands in `out`. attr_funcs (iterable): Optionally, provide a function to apply to each attribute described in `attr_columns`. Please supply `None`, or an iterable of either a `callable` object or `None` for each attribute in `attr_columns`. query_kwds (dict): Additional search terms to pass to :meth:`HDF5ResultsStore.query` Returns: np.ndarray: `out`, but with desired result file attributes mapped into the image """ columns = ( 'px', 'py', ) + attr_columns def guard_out(out): shape = (1, ) * (3 - out.ndim) + out.shape return np.atleast_3d(out).reshape(*shape) out = guard_out(out) assert out.ndim == 3, '`guard_out` should have worked!' if out.shape[0] != len(attr_columns): raise ValueError('Provided `out` must have "{0}" bands to store ' '"{1!r}" but it has "{2}" number of bands'.format( len(attr_columns), attr_columns, out.shape[0])) for _result in results: try: with _result as result: if not table: from IPython.core.debugger import Pdb Pdb().set_trace() # NOQA segs = result.query(table, columns=columns, **query_kwds) y, x = rasterio.transform.rowcol(result.transform, segs['px'], segs['py']) for bidx, attr in enumerate(attr_columns): out[bidx, y, x] = segs[attr] except tb.exceptions.HDF5ExtError as err: logger.error( 'Result file {} is corrupt or unreadable'.format( _result.filename), err) return out
def runcall(*args, **kwargs): return Pdb(def_colors).runcall(*args, **kwargs)
def train_oneparamset(self, indir, outdir, wdir, fid_lst_tra, fid_lst_val, params_savefile, trialstr='', cont=None): print('Loading all validation data at once ...') # X_val, Y_val = data.load_inoutset(indir, outdir, wdir, fid_lst_val, verbose=1) X_vals = data.load(indir, fid_lst_val, verbose=1, label='Context labels: ') Y_vals = data.load(outdir, fid_lst_val, verbose=1, label='Output features: ') X_vals, Y_vals = data.croplen([X_vals, Y_vals]) print(' {} validation files'.format(len(fid_lst_val))) print(' number of validation files / train files: {:.2f}%'.format(100.0*float(len(fid_lst_val))/len(fid_lst_tra))) print('Model initial status before training') worst_val = data.cost_0pred_rmse(Y_vals) print(" 0-pred validation RMSE = {} (100%)".format(worst_val)) init_pred_rms = data.prediction_rms(self._model, [X_vals]) print(' initial RMS of prediction = {}'.format(init_pred_rms)) init_val = data.cost_model_prediction_rmse(self._model, [X_vals], Y_vals) best_val = None print(" initial validation RMSE = {} ({:.4f}%)".format(init_val, 100.0*init_val/worst_val)) nbbatches = int(len(fid_lst_tra)/self.cfg.train_batch_size) print(' using {} batches of {} sentences each'.format(nbbatches, self.cfg.train_batch_size)) print(' model #parameters={}'.format(self._model.count_params())) nbtrainframes = 0 for fid in fid_lst_tra: X = data.loadfile(outdir, fid) nbtrainframes += X.shape[0] print(' Training set: {} sentences, #frames={} ({})'.format(len(fid_lst_tra), nbtrainframes, time.strftime('%H:%M:%S', time.gmtime((nbtrainframes*self._model.vocoder.shift))))) print(' #parameters/#frames={:.2f}'.format(float(self._model.count_params())/nbtrainframes)) if self.cfg.train_nbepochs_scalewdata and not self.cfg.train_batch_lengthmax is None: # During an epoch, the whole data is _not_ seen by the training since cfg.train_batch_lengthmax is limited and smaller to the sentence size. # To compensate for it and make the config below less depedent on the data, the min ans max nbepochs are scaled according to the missing number of frames seen. # TODO Should consider only non-silent frames, many recordings have a lot of pre and post silences epochcoef = nbtrainframes/float((self.cfg.train_batch_lengthmax*len(fid_lst_tra))) print(' scale number of epochs wrt number of frames') self.cfg.train_min_nbepochs = int(self.cfg.train_min_nbepochs*epochcoef) self.cfg.train_max_nbepochs = int(self.cfg.train_max_nbepochs*epochcoef) print(' train_min_nbepochs={}'.format(self.cfg.train_min_nbepochs)) print(' train_max_nbepochs={}'.format(self.cfg.train_max_nbepochs)) self.prepare() # This has to be overwritten by sub-classes costs = defaultdict(list) epochs_modelssaved = [] epochs_durs = [] nbnodecepochs = 0 generator_updates = 0 epochstart = 1 if cont and len(glob.glob(os.path.splitext(params_savefile)[0]+'-trainingstate-last.h5*'))>0: print(' reloading previous training state ...') savedcfg, extras, rngstate = self.loadTrainingState(os.path.splitext(params_savefile)[0]+'-trainingstate-last.h5') np.random.set_state(rngstate) cost_val = extras['cost_val'] # Restoring some local variables costs = extras['costs'] epochs_modelssaved = extras['epochs_modelssaved'] epochs_durs = extras['epochs_durs'] generator_updates = extras['generator_updates'] epochstart = extras['epoch']+1 # Restore the saving criteria if only none of those 3 cfg values changed: if (savedcfg.train_min_nbepochs==self.cfg.train_min_nbepochs) and (savedcfg.train_max_nbepochs==self.cfg.train_max_nbepochs) and (savedcfg.train_cancel_nodecepochs==self.cfg.train_cancel_nodecepochs): best_val = extras['best_val'] nbnodecepochs = extras['nbnodecepochs'] print_log(" start training ...") epoch = -1 for epoch in range(epochstart,1+self.cfg.train_max_nbepochs): timeepochstart = time.time() rndidx = np.arange(int(nbbatches*self.cfg.train_batch_size)) # Need to restart from ordered state to make the shuffling repeatable after reloading training state, the shuffling will be different anyway np.random.shuffle(rndidx) rndidxb = np.split(rndidx, nbbatches) cost_tra = None costs_tra_batches = [] costs_tra_gen_wgan_lse_ratios = [] load_times = [] train_times = [] for batchid in xrange(nbbatches): timeloadstart = time.time() print_tty('\r Training batch {}/{}'.format(1+batchid, nbbatches)) # Load training data online, because data is often too heavy to hold in memory fid_lst_trab = [fid_lst_tra[bidx] for bidx in rndidxb[batchid]] X_trab, Y_trab, W_trab = data.load_inoutset(indir, outdir, wdir, fid_lst_trab, length=self.cfg.train_batch_length, lengthmax=self.cfg.train_batch_lengthmax, maskpadtype=self.cfg.train_batch_padtype, cropmode=self.cfg.train_batch_cropmode) if 0: # Plot batch import matplotlib.pyplot as plt plt.ion() plt.imshow(Y_trab[0,].T, origin='lower', aspect='auto', interpolation='none', cmap='jet') from IPython.core.debugger import Pdb; Pdb().set_trace() load_times.append(time.time()-timeloadstart) print_tty(' (iter load: {:.6f}s); training '.format(load_times[-1])) timetrainstart = time.time() cost_tra = self.train_on_batch(batchid, X_trab, Y_trab) # This has to be overwritten by sub-classes train_times.append(time.time()-timetrainstart) if not cost_tra is None: print_tty('err={:.4f} (iter train: {:.4f}s) '.format(cost_tra,train_times[-1])) if np.isnan(cost_tra): # pragma: no cover print_log(' previous costs: {}'.format(costs_tra_batches)) print_log(' E{} Batch {}/{} train cost = {}'.format(epoch, 1+batchid, nbbatches, cost_tra)) raise ValueError('ERROR: Training cost is nan!') costs_tra_batches.append(cost_tra) print_tty('\r \r') costs['model_training'].append(np.mean(costs_tra_batches)) cost_val = self.update_validation_cost(costs, X_vals, Y_vals) # This has to be overwritten by sub-classes print_log(" E{}/{} {} cost_tra={:.6f} (load:{}s train:{}s) cost_val={:.6f} ({:.4f}% RMSE) {} MiB GPU {} MiB RAM".format(epoch, self.cfg.train_max_nbepochs, trialstr, costs['model_training'][-1], time2str(np.sum(load_times)), time2str(np.sum(train_times)), cost_val, 100*costs['model_rmse_validation'][-1]/worst_val, tf_gpu_memused(), proc_memresident())) sys.stdout.flush() if np.isnan(cost_val): raise ValueError('ERROR: Validation cost is nan!') # if (self._errtype=='LSE') and (cost_val>=self.cfg.train_cancel_validthresh*worst_val): raise ValueError('ERROR: Validation cost blew up! It is higher than {} times the worst possible values'.format(self.cfg.train_cancel_validthresh)) # TODO self._model.save(os.path.splitext(params_savefile)[0]+'-last.h5', printfn=print_log, extras={'cost_val':cost_val}) # Save model parameters if epoch>=self.cfg.train_min_nbepochs: # Assume no model is good enough before self.cfg.train_min_nbepochs if ((best_val is None) or (cost_val<best_val)): # Among all trials of hyper-parameter optimisation best_val = cost_val self._model.save(params_savefile, printfn=print_log, extras={'cost_val':cost_val}, infostr='(E{} C{:.4f})'.format(epoch, best_val)) epochs_modelssaved.append(epoch) nbnodecepochs = 0 else: nbnodecepochs += 1 if self.cfg.train_log_plot: print_log(' saving plots') log_plot_costs(costs, worst_val, fname=os.path.splitext(params_savefile)[0]+'-fig_costs_'+trialstr+'.svg', epochs_modelssaved=epochs_modelssaved) nbsamples = 2 nbsamples = min(nbsamples, len(X_vals)) Y_preds = [] for sampli in xrange(nbsamples): Y_preds.append(self._model.predict(np.reshape(X_vals[sampli],[1]+[s for s in X_vals[sampli].shape]))[0,]) plotsuffix = '' if len(epochs_modelssaved)>0 and epochs_modelssaved[-1]==epoch: plotsuffix='_best' else: plotsuffix='_last' log_plot_samples(Y_vals, Y_preds, nbsamples=nbsamples, fname=os.path.splitext(params_savefile)[0]+'-fig_samples_'+trialstr+plotsuffix+'.png', vocoder=self._model.vocoder, title='E{}'.format(epoch)) epochs_durs.append(time.time()-timeepochstart) print_log(' ET: {} max TT: {}s train ~time left: {}'.format(time2str(epochs_durs[-1]), time2str(np.median(epochs_durs[-10:])*self.cfg.train_max_nbepochs), time2str(np.median(epochs_durs[-10:])*(self.cfg.train_max_nbepochs-epoch)))) self.saveTrainingState(os.path.splitext(params_savefile)[0]+'-trainingstate-last.h5', printfn=print_log, extras={'cost_val':cost_val, 'best_val':best_val, 'costs':costs, 'epochs_modelssaved':epochs_modelssaved, 'epochs_durs':epochs_durs, 'nbnodecepochs':nbnodecepochs, 'generator_updates':generator_updates, 'epoch':epoch}) if nbnodecepochs>=self.cfg.train_cancel_nodecepochs: # pragma: no cover print_log('WARNING: validation error did not decrease for {} epochs. Early stop!'.format(self.cfg.train_cancel_nodecepochs)) break if best_val is None: raise ValueError('No model has been saved during training!') return {'epoch_stopped':epoch, 'worst_val':worst_val, 'best_epoch':epochs_modelssaved[-1] if len(epochs_modelssaved)>0 else -1, 'best_val':best_val}
def runeval(expression, globals=None, locals=None): return Pdb(def_colors).runeval(expression, globals, locals)
fs = 16000 syn = np.zeros(4 * fs) ts = np.arange(len(syn)) / float(fs) # Add some frequencies freqs = [0, fs / 16.0, fs / 2 - fs / 16.0, fs / 2] amps = -32 for freq in freqs: amp = db2mag(amps) print('Synthesise: {:8.2f}Hz at {}dB'.format(freq, amps)) if freq == 0.0 or freq == fs / 2: amp /= 2 syn += amp * 2.0 * np.cos((2 * np.pi * freq) * ts) # Add some clicks clicks = np.array([0.0, 1.0, 2.0, 3.0, (len(syn) - 1) / float(fs)]) syn[(clicks * fs).astype(np.int)] = 0.5 #print(pysndfile.get_sndfile_encodings('wav')) pysndfile.sndio.write('synth_grid_fs' + str(fs) + '.wav', syn, rate=fs, format='wav', enc='float32') if 0: plt.plot(ts, syn, 'k') from IPython.core.debugger import Pdb Pdb().set_trace()
def aperiodicity(wav, fs, f0s, dftlen, ams=None, outresidual=False): ''' Computing the overall harm signal first, then estimating the noise from the residual. It should be a more accurate way to compute the aperiodicity than the original STRAIGHT's implementation ''' from sigproc import sinusoidal # Computing the overall harm signal first, then estimating the noise from the residual #sins = sinusoidal.estimate_sinusoidal_params(wav, fs, f0s) #wavlen = len(wav) #sinwav = sinusoidal.synthesize_harmonics(f0s, sins, fs, wavlen) #res = wav-sinwav # Replace 0s by interpolations f0s = f0s.copy() f0s[:,1] = np.interp(f0s[:,0], f0s[f0s[:,1]>0,0], f0s[f0s[:,1]>0,1]) if outresidual: ress = np.zeros(wav.shape) reswins = np.zeros(wav.shape) F = (float(fs)/dftlen)*np.arange(dftlen/2+1) APER = np.zeros((len(f0s[:,0]),dftlen/2+1)) for n, t in enumerate(f0s[:,0]): f0 = f0s[n,1] print "\rt={:0.3f}s({:.0f}%) f0={:0.2f}Hz".format(t, 100*t/f0s[f0s.shape[0]-1,0], f0), # Window's length winlen = int(0.5+(3*fs/f0)/2)*2+1 # with rounding # Extract the signal segment to analyse winidx = np.arange(-int((winlen-1)/2),int((winlen-1)/2+1), dtype=np.int64) winidx += int(0.5+fs*t) if winidx[0]<0 or winidx[-1]>=len(wav): # The window is partly outside of the signal ... wav4win = np.zeros(winlen) # ... copy only the existing part itouse = np.logical_and(winidx>=0,winidx<len(wav)) wav4win[itouse] = wav[winidx[itouse]] else : wav4win = wav[winidx] # The initial frequencies are freqs = f0 * np.arange(int(np.floor((fs/2.0-f0/2.0)/f0))+1) if np.linalg.norm(wav4win)<sys.float_info.epsilon: # The signal is empty: Add "empty" data # TODO continue # Window's shape win = np.blackman(winlen) win = win/sum(win) # Normalize for sinusoidal content S = sinusoidal.compute_dft(wav4win, fs, win, dftlen, winidx, ams) sin = sinusoidal.extract_peaks(S, fs, f0, winlen, dftlen) #from IPython.core.debugger import Pdb; Pdb().set_trace() syn = sinusoidal.synthesize_harmonics(np.array([[((winlen-1)/2.0)/float(fs), f0]]), [sin], fs, winlen) res = wav4win-syn if winidx[0]<0 or winidx[-1]>=len(wav): # The window is partly outside of the signal ... # ... copy only the existing part itouse = np.logical_and(winidx>=0,winidx<len(wav)) ress[winidx[itouse]] += res[itouse]*win[itouse] reswins[winidx[itouse]] += win[itouse] else: ress[winidx] += res*win reswins[winidx] += win N = sp.mag2db(np.fft.rfft(res*win, dftlen)) E = np.interp(F, sin[0,1:], sp.mag2db(sin[1,1:])) APER[n,:] = N - E if t>0.3 and 0: SA = sp.mag2db(S) SA[np.isinf(SA)] = np.finfo(SA[0]).min plt.plot(F, SA, 'k') plt.plot(sin[0,:], sp.mag2db(sin[1,:]), 'xk') SYN = sinusoidal.compute_dft(syn, fs, win, dftlen, winidx) plt.plot(F, sp.mag2db(SYN), 'b') plt.plot(F, E, 'b') plt.plot(F, sp.mag2db(np.fft.rfft(res*win, dftlen)), 'r') plt.plot(F, APER[n,:], 'g') from IPython.core.debugger import Pdb; Pdb().set_trace() print '\r \r', if outresidual: idx = reswins>0.0 ress[idx] /= reswins[idx] return APER, ress else: return APER
def debug(f, *args, **kwargs): from IPython.core.debugger import Pdb pdb = Pdb(color_scheme='Linux') return pdb.runcall(f, *args, **kwargs)
def set_trace(): Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def debug(f, *args, **kwargs): pdb = Pdb(color_scheme="Linux") return pdb.runcall(f, *args, **kwargs)
def plot_features(wav=None, fs=None, f0s=None, SPEC=None, PDD=None, NM=None): # pragma: no cover # TODO Could test this by writting in a picture tstart = 0.0 tend = 1.0 nbview = 0 if not wav is None: nbview += 1 if not f0s is None: nbview += 1 if not SPEC is None: nbview += 1 if not PDD is None: nbview += 1 if not NM is None: nbview += 1 import matplotlib.pyplot as plt plt.ion() _, axs = plt.subplots(nbview, 1, sharex=True, sharey=False) if not isinstance(axs, np.ndarray): axs = np.array([axs]) view = 0 if not wav is None: times = np.arange(len(wav)) / float(fs) axs[view].plot(times, wav, 'k') axs[view].set_ylabel('Waveform\nAmplitude') axs[view].grid() axs[view].set_xlim((0.0, times[-1])) view += 1 if not f0s is None: tstart = f0s[0, 0] tend = f0s[-1, 0] axs[view].plot(f0s[:, 0], f0s[:, 1], 'k') axs[view].set_ylabel('F0\nFrequency [Hz]') axs[view].grid() view += 1 if not SPEC is None: axs[view].imshow(sp.mag2db(SPEC).T, origin='lower', aspect='auto', interpolation='none', extent=(tstart, tend, 0, 0.5 * fs), cmap='jet') axs[view].set_ylabel('Amp. Envelope\nFrequency [Hz]') view += 1 if not PDD is None: axs[view].imshow(PDD.T, origin='lower', aspect='auto', interpolation='none', extent=(tstart, tend, 0, 0.5 * fs), cmap='jet', vmin=0.0, vmax=2.0) axs[view].set_ylabel('PDD\nFrequency [Hz]') view += 1 if not NM is None: axs[view].imshow(NM.T, origin='lower', aspect='auto', interpolation='none', extent=(tstart, tend, 0, 0.5 * fs), cmap='Greys', vmin=0.0, vmax=1.0) axs[view].set_ylabel('Noise Mask \nFrequency [Hz]') view += 1 axs[-1].set_xlabel('Time [s]') from IPython.core.debugger import Pdb Pdb().set_trace()
def set_trace(): """A Poor mans break point""" # without this in iPython debugger can generate strange characters. from IPython.core.debugger import Pdb Pdb().set_trace(sys._getframe().f_back)
def trace(code, preparse=True): r""" Evaluate Sage code using the interactive tracer and return the result. The string ``code`` must be a valid expression enclosed in quotes (no assignments - the result of the expression is returned). In the Sage notebook this just raises a NotImplementedException. INPUT: - ``code`` - str - ``preparse`` - bool (default: True); if True, run expression through the Sage preparser. REMARKS: This function is extremely powerful! For example, if you want to step through each line of execution of, e.g., ``factor(100)``, type :: sage: trace("factor(100)") # not tested then at the (Pdb) prompt type ``s`` (or ``step``), then press return over and over to step through every line of Python that is called in the course of the above computation. Type ``?`` at any time for help on how to use the debugger (e.g., ``l`` lists 11 lines around the current line; ``bt`` gives a back trace, etc.). Setting a break point: If you have some code in a file and would like to drop into the debugger at a given point, put the following code at that point in the file: ``import pdb; pdb.set_trace()`` For an article on how to use the Python debugger, see http://www.onlamp.com/pub/a/python/2005/09/01/debugger.html TESTS: The only real way to test this is via pexpect spawning a sage subprocess that uses IPython. :: sage: import pexpect sage: s = pexpect.spawn('sage') sage: _ = s.sendline("trace('print factor(10)'); print 3+97") sage: _ = s.sendline("s"); _ = s.sendline("c"); sage: _ = s.expect('100', timeout=90) Seeing the ipdb prompt and the 2 \* 5 in the output below is a strong indication that the trace command worked correctly. :: sage: print s.before[s.before.find('--'):] --... ipdb> c 2 * 5 We test what happens in notebook embedded mode:: sage: sage.plot.plot.EMBEDDED_MODE = True sage: trace('print factor(10)') Traceback (most recent call last): ... NotImplementedError: the trace command is not implemented in the Sage notebook; you must use the command line. """ from sage.plot.plot import EMBEDDED_MODE if EMBEDDED_MODE: raise NotImplementedError( "the trace command is not implemented in the Sage notebook; you must use the command line." ) from IPython.core.debugger import Pdb pdb = Pdb() try: ipython = get_ipython() except NameError: raise NotImplementedError( "the trace command can only be run from the Sage command-line") import preparser code = preparser.preparse(code) return pdb.run(code, ipython.user_ns)
def set_trace(): from IPython.core.debugger import Pdb import sys Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def debug(f,*args, **kwargs): # allows arbitrarily calling debugger for a function. Press "c" to resume # the function; press "s" to step through each line of the function from IPython.core.debugger import Pdb pdb = Pdb(color_scheme='Linux') return pdb.runcall(f,*args, **kwargs)