def analysisSynthesisStreaming(params, signal):

    out = numpy.array(0)
    pool = essentia.Pool()
    fcut = es.FrameCutter(frameSize=params['frameSize'],
                          hopSize=params['hopSize'],
                          startFromZero=False)
    w = es.Windowing(type="hann")
    fft = es.FFT(size=params['frameSize'])
    ifft = es.IFFT(size=params['frameSize'])
    overl = es.OverlapAdd(frameSize=params['frameSize'],
                          hopSize=params['hopSize'])

    # add half window of zeros to input signal to reach same ooutput length
    signal = numpy.append(signal, zeros(params['frameSize'] / 2))
    insignal = VectorInput(signal)
    insignal.data >> fcut.signal
    fcut.frame >> w.frame
    w.frame >> fft.frame
    fft.fft >> ifft.fft
    ifft.frame >> overl.frame
    overl.signal >> (pool, 'audio')

    essentia.run(insignal)

    # remove first half window frames
    outaudio = pool['audio']
    outaudio = outaudio[2 * params['hopSize']:]
    return outaudio
Beispiel #2
0
def analHarmonicModelStreaming(params, signal):

    #out = numpy.array(0)
    pool = essentia.Pool()

    # windowing and FFT
    fcut = es.FrameCutter(frameSize=params['frameSize'],
                          hopSize=params['hopSize'],
                          startFromZero=False)
    w = es.Windowing(type="blackmanharris92")
    fft = es.FFT(size=params['frameSize'])
    spec = es.Spectrum(size=params['frameSize'])

    # pitch detection
    pitchDetect = es.PitchYinFFT(frameSize=params['frameSize'],
                                 sampleRate=params['sampleRate'])

    smanal = es.HarmonicModelAnal(
        sampleRate=params['sampleRate'],
        maxnSines=params['maxnSines'],
        magnitudeThreshold=params['magnitudeThreshold'],
        freqDevOffset=params['freqDevOffset'],
        freqDevSlope=params['freqDevSlope'])

    # add half window of zeros to input signal to reach same ooutput length
    signal = numpy.append(signal, zeros(params['frameSize'] / 2))
    insignal = VectorInput(signal)
    insignal.data >> fcut.signal

    fcut.frame >> w.frame
    w.frame >> spec.frame
    w.frame >> fft.frame
    spec.spectrum >> pitchDetect.spectrum

    fft.fft >> smanal.fft
    pitchDetect.pitch >> smanal.pitch
    pitchDetect.pitchConfidence >> (pool, 'pitchConfidence')

    smanal.magnitudes >> (pool, 'magnitudes')
    smanal.frequencies >> (pool, 'frequencies')
    smanal.phases >> (pool, 'phases')

    essentia.run(insignal)

    # remove first half window frames
    mags = pool['magnitudes']
    freqs = pool['frequencies']
    phases = pool['phases']

    # remove short tracks
    minFrames = int(params['minSineDur'] * params['sampleRate'] /
                    params['hopSize'])
    freqsClean = cleaningSineTracks(freqs, minFrames)
    pool['frequencies'].data = freqsClean

    return mags, freqsClean, phases
Beispiel #3
0
def analsynthSineModelStreaming(params, signal):

    out = numpy.array(0)

    pool = essentia.Pool()
    fcut = es.FrameCutter(frameSize=params['frameSize'],
                          hopSize=params['hopSize'],
                          startFromZero=False)
    w = es.Windowing(type="blackmanharris92")
    fft = es.FFT(size=params['frameSize'])
    smanal = es.SineModelAnal(sampleRate=params['sampleRate'],
                              maxnSines=params['maxnSines'],
                              magnitudeThreshold=params['magnitudeThreshold'],
                              freqDevOffset=params['freqDevOffset'],
                              freqDevSlope=params['freqDevSlope'])
    smsyn = es.SineModelSynth(sampleRate=params['sampleRate'],
                              fftSize=params['frameSize'],
                              hopSize=params['hopSize'])
    ifft = es.IFFT(size=params['frameSize'])
    overl = es.OverlapAdd(frameSize=params['frameSize'],
                          hopSize=params['hopSize'],
                          gain=1. / params['frameSize'])

    # add half window of zeros to input signal to reach same ooutput length
    signal = numpy.append(signal, zeros(params['frameSize'] / 2))
    insignal = VectorInput(signal)
    # analysis
    insignal.data >> fcut.signal
    fcut.frame >> w.frame
    w.frame >> fft.frame
    fft.fft >> smanal.fft
    smanal.magnitudes >> (pool, 'magnitudes')
    smanal.frequencies >> (pool, 'frequencies')
    smanal.phases >> (pool, 'phases')
    # synthesis
    smanal.magnitudes >> smsyn.magnitudes
    smanal.frequencies >> smsyn.frequencies
    smanal.phases >> smsyn.phases
    smsyn.fft >> ifft.fft
    ifft.frame >> overl.frame
    overl.signal >> (pool, 'audio')

    essentia.run(insignal)

    # remove short tracks
    freqs = pool['frequencies']
    minFrames = int(params['minSineDur'] * params['sampleRate'] /
                    params['hopSize'])
    freqsClean = cleaningSineTracks(freqs, minFrames)
    pool['frequencies'].data = freqsClean

    # remove first half window frames
    outaudio = pool['audio']
    outaudio = outaudio[2 * params['hopSize']:]

    return outaudio, pool
def analsynthHarmonicMaskStreaming(params, signal):

    out = array([0.])

    pool = essentia.Pool()
    # windowing and FFT
    fcut = es.FrameCutter(frameSize=params['frameSize'],
                          hopSize=params['hopSize'],
                          startFromZero=False)
    w = es.Windowing(type="blackmanharris92")
    fft = es.FFT(size=params['frameSize'])
    spec = es.Spectrum(size=params['frameSize'])

    # pitch detection
    pitchDetect = es.PitchYinFFT(frameSize=params['frameSize'],
                                 sampleRate=params['sampleRate'])

    hmask = es.HarmonicMask(sampleRate=params['sampleRate'],
                            binWidth=params['binWidth'],
                            attenuation=params['attenuation_dB'])

    ifft = es.IFFT(size=params['frameSize'])
    overl = es.OverlapAdd(frameSize=params['frameSize'],
                          hopSize=params['hopSize'])

    # add half window of zeros to input signal to reach same ooutput length
    signal = numpy.append(signal, zeros(params['frameSize'] // 2))
    insignal = VectorInput(signal)

    # analysis
    insignal.data >> fcut.signal
    fcut.frame >> w.frame
    w.frame >> spec.frame
    w.frame >> fft.frame
    spec.spectrum >> pitchDetect.spectrum

    fft.fft >> hmask.fft
    pitchDetect.pitch >> hmask.pitch
    pitchDetect.pitchConfidence >> (pool, 'pitchConfidence')

    hmask.fft >> ifft.fft

    ifft.frame >> overl.frame
    overl.signal >> (pool, 'audio')

    essentia.run(insignal)

    # remove first half window frames
    outaudio = pool['audio']
    outaudio = outaudio[2 * params['hopSize']:]

    return outaudio, pool
Beispiel #5
0
def analsynthSineSubtractionStreaming(params, signal):

    out = numpy.array(0)
    pool = essentia.Pool()
    fcut = es.FrameCutter(frameSize=params['frameSize'],
                          hopSize=params['hopSize'],
                          startFromZero=False)
    w = es.Windowing(type="blackmanharris92")
    fft = es.FFT(size=params['frameSize'])
    smanal = es.SineModelAnal(sampleRate=params['sampleRate'],
                              maxnSines=params['maxnSines'],
                              magnitudeThreshold=params['magnitudeThreshold'],
                              freqDevOffset=params['freqDevOffset'],
                              freqDevSlope=params['freqDevSlope'])

    subtrFFTSize = min(params['frameSize'] / 4, 4 * params['hopSize'])
    smsub = es.SineSubtraction(sampleRate=params['sampleRate'],
                               fftSize=subtrFFTSize,
                               hopSize=params['hopSize'])

    # add half window of zeros to input signal to reach same ooutput length
    signal = numpy.append(signal, zeros(params['frameSize'] / 2))

    insignal = VectorInput(signal)
    # analysis
    insignal.data >> fcut.signal
    fcut.frame >> w.frame
    w.frame >> fft.frame
    fft.fft >> smanal.fft
    smanal.magnitudes >> (pool, 'magnitudes')
    smanal.frequencies >> (pool, 'frequencies')
    smanal.phases >> (pool, 'phases')
    # subtraction
    fcut.frame >> smsub.frame
    smanal.magnitudes >> smsub.magnitudes
    smanal.frequencies >> smsub.frequencies
    smanal.phases >> smsub.phases
    smsub.frame >> (pool, 'frames')

    essentia.run(insignal)

    print pool['frames'].shape
    outaudio = framesToAudio(pool['frames'])
    outaudio = outaudio[2 * params['hopSize']:]

    return outaudio, pool
Beispiel #6
0
def analSineModelStreaming(params, signal):

    #out = numpy.array(0)
    pool = essentia.Pool()
    fcut = es.FrameCutter(frameSize=params['frameSize'],
                          hopSize=params['hopSize'],
                          startFromZero=False)
    w = es.Windowing(type="hann")
    fft = es.FFT(size=params['frameSize'])
    smanal = es.SineModelAnal(sampleRate=params['sampleRate'],
                              maxnSines=params['maxnSines'],
                              magnitudeThreshold=params['magnitudeThreshold'],
                              freqDevOffset=params['freqDevOffset'],
                              freqDevSlope=params['freqDevSlope'])

    # add half window of zeros to input signal to reach same ooutput length
    signal = numpy.append(signal, zeros(params['frameSize'] / 2))
    insignal = VectorInput(signal)
    insignal.data >> fcut.signal
    fcut.frame >> w.frame
    w.frame >> fft.frame
    fft.fft >> smanal.fft
    smanal.magnitudes >> (pool, 'magnitudes')
    smanal.frequencies >> (pool, 'frequencies')
    smanal.phases >> (pool, 'phases')

    essentia.run(insignal)

    # remove first half window frames
    mags = pool['magnitudes']
    freqs = pool['frequencies']
    phases = pool['phases']

    # remove short tracks
    minFrames = int(params['minSineDur'] * params['sampleRate'] /
                    params['hopSize'])
    freqsClean = cleaningSineTracks(freqs, minFrames)
    pool['frequencies'].data = freqsClean

    return mags, freqsClean, phases
Beispiel #7
0
audioout = np.array(0)
counter = 0

# input and output files
import os.path
tutorial_dir = os.path.dirname(os.path.realpath(__file__))
inputFilename = os.path.join(tutorial_dir, 'singing-female.wav')
outputFilename = os.path.join(tutorial_dir, 'singing-female-out-sinesubtraction.wav')


out = np.array(0)
loader = es.MonoLoader(filename = inputFilename, sampleRate =  params['sampleRate'])
pool = essentia.Pool()
fcut = es.FrameCutter(frameSize = params['frameSize'], hopSize = params['hopSize'], startFromZero =  False);
w = es.Windowing(type = "blackmanharris92");
fft = es.FFT(size = params['frameSize']);
smanal = es.SineModelAnal(sampleRate = params['sampleRate'], maxnSines = params['maxnSines'], magnitudeThreshold = params['magnitudeThreshold'], freqDevOffset = params['freqDevOffset'], freqDevSlope = params['freqDevSlope'])
subtrFFTSize = min(params['frameSize']/4, 4* params['hopSize'])
smsub = es.SineSubtraction(sampleRate = params['sampleRate'], fftSize = subtrFFTSize, hopSize = params['hopSize'])


# analysis
loader.audio >> fcut.signal
fcut.frame >> w.frame
w.frame >> fft.frame
fft.fft >> smanal.fft
smanal.magnitudes >> (pool, 'magnitudes')
smanal.frequencies >> (pool, 'frequencies')
smanal.phases >> (pool, 'phases')
# subtraction
fcut.frame >> smsub.frame
    if counter >= (framesize/(2*hopsize)):
      audioout = np.append(audioout, out)
    counter += 1

  # write audio output
  print audioout.shape
  awrite(audioout.astype(np.float32))


if mode == 'streaming':
  out = np.array(0)
  loader = es.MonoLoader(filename = inputFilename, sampleRate = 44100)
  pool = essentia.Pool()
  fcut = es.FrameCutter(frameSize = framesize, hopSize = hopsize, startFromZero =  False);
  w = es.Windowing(type = "hann");
  fft = es.FFT(size = framesize);
  ifft = es.IFFT(size = framesize);
  overl = es.OverlapAdd (frameSize = framesize, hopSize = hopsize);
  awrite = es.MonoWriter (filename = outputFilename, sampleRate = 44100);
  
  #gen = audio #VectorInput(audio)
  loader.audio >> fcut.signal
  fcut.frame >> w.frame
  w.frame >> fft.frame
  fft.fft >> ifft.fft
  ifft.frame >> overl.frame
  overl.signal >> awrite.audio
  overl.signal >> (pool, 'audio')
  
  
  essentia.run(loader)