def runit(self, siglen, fmin, fmax, obins, sllen, trlen, real): sig = rndsig[:siglen] scale = OctScale(fmin, fmax, obins) nsgt = NSGT_sliced(scale, fs=44100, sl_len=sllen, tr_area=trlen, real=real) c = nsgt.forward((sig,)) rc = nsgt.backward(c) s_r = np.concatenate(list(map(list,rc)))[:len(sig)] close = np.allclose(sig, s_r, atol=1.e-3) if not close: print("Failing params:", siglen, fmin, fmax, obins, sllen, trlen, real) dev = np.abs(s_r-sig) print("Error", np.where(dev>1.e-3), np.max(dev)) self.assertTrue(close)
def runit(self, siglen, fmin, fmax, obins, sllen, trlen, real): sig = rndsig[:siglen] scale = OctScale(fmin, fmax, obins) nsgt = NSGT_sliced(scale, fs=44100, sl_len=sllen, tr_area=trlen, real=real) c = nsgt.forward((sig,)) rc = nsgt.backward(c) s_r = np.concatenate(map(list,rc))[:len(sig)] close = np.allclose(sig, s_r, atol=1.e-3) if not close: print "Failing params:", siglen, fmin, fmax, obins, sllen, trlen, real dev = np.abs(s_r-sig) print "Error", np.where(dev>1.e-3), np.max(dev) self.assertTrue(close)
# Read audio data sf = SndReader(args.input, sr=fs, chns=2) signal = sf() # duration of signal in s dur = sf.frames/float(fs) # total number of coefficients to represent input signal ncoefs = int(sf.frames*slicq.coef_factor) # read slices from audio file and mix down signal, if necessary at all if not args.downmix_after: signal = ((np.mean(s, axis=0),) for s in signal) # generator for forward transformation c = slicq.forward(signal) # add up overlapping coefficient slices coefs = assemble_coeffs(c, ncoefs) del sf # not needed any more # compute magnitude spectrum mindb = -100. mls = np.abs(coefs) # mix down multichannel mls = np.mean(mls, axis=-1) np.maximum(mls, 10**(mindb/20.), out=mls) if args.mag_scale == 'dB': np.log10(mls, out=mls)
# Read audio data sf = SndReader(args.input, sr=fs, chns=2) signal = sf() # duration of signal in s dur = sf.frames / float(fs) # total number of coefficients to represent input signal ncoefs = int(sf.frames * slicq.coef_factor) # read slices from audio file and mix down signal, if necessary at all if not args.downmix_after: signal = ((np.mean(s, axis=0), ) for s in signal) # generator for forward transformation c = slicq.forward(signal) # add up overlapping coefficient slices coefs = assemble_coeffs(c, ncoefs) del sf # not needed any more # compute magnitude spectrum mindb = -100. mls = np.abs(coefs) # mix down multichannel mls = np.mean(mls, axis=-1) np.maximum(mls, 10**(mindb / 20.), out=mls) if args.mag_scale == 'dB': np.log10(mls, out=mls)
def main(): parser = ArgumentParser() parser.add_argument( "--mask", type=str, default="soft", choices=("hard", "soft"), help="mask strategy", ) parser.add_argument("--outdir", type=str, default="./", help="output directory") parser.add_argument( "--stream-size", type=int, default=1024, help="stream size for simulated realtime from wav (default=%(default)s)", ) parser.add_argument("input", type=str, help="input file") args = parser.parse_args() prefix = args.input.split("/")[-1].split("_")[0] harm_out = os.path.join(args.outdir, prefix + "_harmonic.wav") perc_out = os.path.join(args.outdir, prefix + "_percussive.wav") print("writing files to {0}, {1}".format(harm_out, perc_out)) lharm = 17 lperc = 7 # calculate transform parameters nsgt_scale = OctScale(80, 20000, 12) trlen = args.stream_size # transition length sllen = 4 * args.stream_size # slice length x, fs = librosa.load(args.input, sr=None) xh = numpy.zeros_like(x) xp = numpy.zeros_like(x) hop = trlen chunk_size = hop n_chunks = int(numpy.floor(x.shape[0] // hop)) eps = numpy.finfo(numpy.float32).eps slicq = NSGT_sliced( nsgt_scale, sllen, trlen, fs, real=True, matrixform=True, ) total_time = 0.0 for chunk in range(n_chunks - 1): t1 = cputime() start = chunk * hop end = start + sllen s = x[start:end] signal = (s,) c = slicq.forward(signal) c = list(c) C = numpy.asarray(c) Cmag = numpy.abs(C) H = scipy.ndimage.median_filter(Cmag, size=(1, lharm, 1)) P = scipy.ndimage.median_filter(Cmag, size=(1, 1, lperc)) if args.mask == "soft": # soft mask first tot = numpy.power(H, 2.0) + numpy.power(P, 2.0) + eps Mp = numpy.divide(numpy.power(H, 2.0), tot) Mh = numpy.divide(numpy.power(P, 2.0), tot) else: Mh = numpy.divide(H, P + eps) > 2.0 Mp = numpy.divide(P, H + eps) >= 2.0 Cp = numpy.multiply(Mp, C) Ch = numpy.multiply(Mh, C) # generator for backward transformation outseq_h = slicq.backward(Ch) outseq_p = slicq.backward(Cp) # make single output array from iterator sh_r = next(reblock(outseq_h, len(s), fulllast=False)) sh_r = sh_r.real sp_r = next(reblock(outseq_p, len(s), fulllast=False)) sp_r = sp_r.real xh[start:end] = sh_r xp[start:end] = sp_r t2 = cputime() total_time += t2 - t1 print("Calculation time per iter: %fs" % (total_time / n_chunks)) scipy.io.wavfile.write(harm_out, fs, xh) scipy.io.wavfile.write(perc_out, fs, xp) return 0
device="cpu") else: slicq = NSGT(scl, fs, signal.shape[-1], real=True, matrixform=True, multichannel=True, device="cpu") # total number of coefficients to represent input signal #ncoefs = int(sf.frames*slicq.coef_factor) # generator for forward transformation if args.nonsliced: c = slicq.forward(signal) else: c = slicq.forward((signal, )) # add a batch c = torch.unsqueeze(c, dim=0) transform_name = 'sliCQT' if not args.nonsliced else 'NSGT' if args.fmin > 0.0: freqs = numpy.r_[[0.], freqs] if args.plot: slicq_params = '{0} scale, {1} bins, {2:.1f}-{3:.1f} Hz'.format( args.scale, args.bins, args.fmin, args.fmax)