コード例 #1
0
def algorithm_durations(sound):
    """
    Returns the duration of a file according to its length in number of samples and according to an envelope
    computation (See FFont ismir paper TODO: cite correctly).
    :param sound: sound dictionary from dataset
    :return: dictionary with results per different methods
    """
    results = dict()
    sample_rate = 44100
    n_channels = 1
    audio = load_audio_file(file_path=sound[SOUND_FILE_KEY], sample_rate=sample_rate)
    length_samples = len(audio)
    duration = float(len(audio))/(sample_rate * n_channels)
    # NOTE: load_audio_file will resample to the given sample_rate and downmix to mono

    # Effective duration
    env = estd.Envelope(attackTime=10, releaseTime=10)
    envelope = env(essentia.array(audio))
    threshold = envelope.max() * 0.05
    envelope_above_threshold = np.where(envelope >= threshold)
    start_effective_duration = envelope_above_threshold[0][0]
    end_effective_duration = envelope_above_threshold[0][-1]
    length_samples_effective_duration = end_effective_duration - start_effective_duration

    results['durations'] = {
        'duration': duration,
        'length_samples': length_samples,
        'length_samples_effective_duration': length_samples_effective_duration,
        'start_effective_duration': start_effective_duration,
        'end_effective_duration': end_effective_duration
    }
    return results
コード例 #2
0
def computeNoveltyCurve(filename, pool):
    loader = EasyLoader(filename=filename,
                        sampleRate=pool['samplerate'],
                        startTime=STARTTIME,
                        endTime=ENDTIME,
                        downmix=pool['downmix'])
    fc = FrameCutter(frameSize=int(pool['framesize']),
                     silentFrames='noise',
                     hopSize=int(pool['hopsize']),
                     startFromZero=False)
    window = Windowing(type=pool['window'], zeroPhase=False)
    #freqBands = FrequencyBands(frequencyBands=EqBands, sampleRate=pool['samplerate'])
    freqBands = FrequencyBands(sampleRate=pool['samplerate'])
    spec = Spectrum()
    hfc = HFC()

    loader.audio >> fc.signal
    fc.frame >> window.frame >> spec.frame
    spec.spectrum >> freqBands.spectrum
    spec.spectrum >> hfc.spectrum
    freqBands.bands >> (pool, 'frequency_bands')
    hfc.hfc >> (pool, 'hfc')
    essentia.run(loader)

    pool.set('size', loader.audio.totalProduced())
    pool.set('length', pool['size'] / pool['samplerate'])

    # compute a weighting curve that is according to frequency bands:
    frequencyBands = pool['frequency_bands']
    nFrames = len(frequencyBands)
    weightCurve = np.sum(frequencyBands, axis=0)
    weightCurve = [val / float(nFrames) for val in weightCurve]

    weightCurve = essentia.normalize(weightCurve)
    #pyplot.plot(weightCurve)
    #pyplot.show()

    noveltyCurve = std.NoveltyCurve(frameRate=pool['framerate'],
                                    weightCurveType=pool['weight'],
                                    weightCurve=weightCurve)(frequencyBands)
    #for x in noveltyCurve: pool.add('novelty_curve', x)
    #return

    # derivative of hfc seems to help in finding more precise beats...
    hfc = essentia.normalize(pool['hfc'])
    dhfc = essentia.derivative(hfc)
    for i, val in enumerate(dhfc):
        if val < 0: continue
        noveltyCurve[i] += val

    # low pass filter novelty curve:
    env = std.Envelope(attackTime=2. / pool['framerate'],
                       releaseTime=2. / pool['framerate'])(noveltyCurve)

    # apply median filter:
    windowSize = 8  #samples
    size = len(env)
    filtered = zeros(size)
    for i in range(size):
        start = i - windowSize
        if start < 0: start = 0
        end = start + windowSize
        if end > size:
            end = size
            start = size - windowSize
        filtered[i] = env[i] - np.median(env[start:end])
        if filtered[i] < 0: filtered[i] = 0

    #pyplot.subplot(311)
    #pyplot.plot(noveltyCurve)
    #pyplot.subplot(312)
    #pyplot.plot(env, 'r')
    #pyplot.subplot(313)
    #pyplot.plot(filtered, 'g')
    #pyplot.show()

    #for x in noveltyCurve: pool.add('novelty_curve', x)
    for x in filtered:
        pool.add('novelty_curve', x)
コード例 #3
0
    def runAnalysis(self):

        # Get all samples referenced in DB, except for those
        # that have been marked as samples to exclude
        # TODO: Need to clarify whether or not sample packs
        # should be excluded if we can't find enough info on them,
        # for now including all samplepacks
        samples = Sample.objects.all().filter(exclude=False,
                                              #kit__sample_pack__exclude=False,
                                              )

        numSamples = len(samples)
        self.stdout.write("Running low-level extractors on %s samples. " %
                          (numSamples))
        i = 0.0

        for sample in samples:
            # Get audio and run loudness analysis
            try:
                loader = es.MonoLoader(filename=sample.path)
                neqAudio = loader()

                eqLoader = es.EqloudLoader(filename=sample.path)
                eqAudio = eqLoader()

                # Trim the audio clip
                trimmer = es.Trimmer(startTime=sample.start_time,
                                     endTime=sample.stop_time)
                neqAudio = trimmer(neqAudio)
                eqAudio = trimmer(eqAudio)

            except RuntimeError as esExcept:
                self.stderr.write("%s\n" % esExcept)
                self.stderr.write(
                    "%s failed to load. Excluding sample from further analysis"
                    % sample.path)
                sample.exclude = True
                sample.save()
                i = i + 1
                continue

            # Frame size & hop size
            frameSize = 2048
            hopSize = 256

            # Amplitude envelope of sample
            envelope = es.Envelope()
            audioEnv = envelope(eqAudio)

            # Find attack phase and LAT
            latFunc = es.LogAttackTime()
            lat, attackStart, attackEnd = latFunc(audioEnv)

            # Temporal Centroid on entire sample length
            tc = self.temporal_centroid(eqAudio)

            # Time segmentation starting point
            windowFunc = es.LogAttackTime(startAttackThreshold=float(
                self.windowStart if self.windowStart < 90 else 90) / 100)
            _, windowStart, windowEnd = windowFunc(audioEnv)
            windowStart = windowStart if self.windowStart < 90 else windowEnd

            if self.windowLength > 0:
                # Window from onset
                trimmer = es.Trimmer(startTime=windowStart,
                                     endTime=windowStart +
                                     (float(self.windowLength) / 1000))
                eqAudio = trimmer(eqAudio)
                neqAudio = trimmer(neqAudio)

            # Get analysis object for this sample
            try:
                analysisObject = Feature.objects.get(
                    sample=sample,
                    window_length=self.windowLength,
                    window_start=self.windowStart)
            except Feature.DoesNotExist:
                analysisObject = Feature(sample=sample,
                                         window_length=self.windowLength,
                                         window_start=self.windowStart)

            analysisObject.lat = lat
            analysisObject.rms = self.rms(eqAudio)
            analysisObject.temporal_centroid = tc

            # Spectral extractor without equal loudness filter
            neqSpectralExtractor = es.LowLevelSpectralExtractor(
                frameSize=frameSize, hopSize=hopSize)
            neqSpectralResults = neqSpectralExtractor(neqAudio)

            bark_mean = np.mean(neqSpectralResults[0], axis=0)
            analysisObject.bark_1_mean = bark_mean[0]
            analysisObject.bark_2_mean = bark_mean[1]
            analysisObject.bark_3_mean = bark_mean[2]
            analysisObject.bark_4_mean = bark_mean[3]
            analysisObject.bark_5_mean = bark_mean[4]
            analysisObject.bark_6_mean = bark_mean[5]
            analysisObject.bark_7_mean = bark_mean[6]
            analysisObject.bark_8_mean = bark_mean[7]
            analysisObject.bark_9_mean = bark_mean[8]
            analysisObject.bark_10_mean = bark_mean[9]
            analysisObject.bark_11_mean = bark_mean[10]
            analysisObject.bark_12_mean = bark_mean[11]
            analysisObject.bark_13_mean = bark_mean[12]
            analysisObject.bark_14_mean = bark_mean[13]
            analysisObject.bark_15_mean = bark_mean[14]
            analysisObject.bark_16_mean = bark_mean[15]
            analysisObject.bark_17_mean = bark_mean[16]
            analysisObject.bark_18_mean = bark_mean[17]
            analysisObject.bark_19_mean = bark_mean[18]
            analysisObject.bark_20_mean = bark_mean[19]
            analysisObject.bark_21_mean = bark_mean[20]
            analysisObject.bark_22_mean = bark_mean[21]
            analysisObject.bark_23_mean = bark_mean[22]
            analysisObject.bark_24_mean = bark_mean[23]
            analysisObject.bark_25_mean = bark_mean[24]
            analysisObject.bark_26_mean = bark_mean[25]
            analysisObject.bark_27_mean = bark_mean[26]

            bark_dev = np.std(neqSpectralResults[0], axis=0)
            analysisObject.bark_1_dev = bark_dev[0]
            analysisObject.bark_2_dev = bark_dev[1]
            analysisObject.bark_3_dev = bark_dev[2]
            analysisObject.bark_4_dev = bark_dev[3]
            analysisObject.bark_5_dev = bark_dev[4]
            analysisObject.bark_6_dev = bark_dev[5]
            analysisObject.bark_7_dev = bark_dev[6]
            analysisObject.bark_8_dev = bark_dev[7]
            analysisObject.bark_9_dev = bark_dev[8]
            analysisObject.bark_10_dev = bark_dev[9]
            analysisObject.bark_11_dev = bark_dev[10]
            analysisObject.bark_12_dev = bark_dev[11]
            analysisObject.bark_13_dev = bark_dev[12]
            analysisObject.bark_14_dev = bark_dev[13]
            analysisObject.bark_15_dev = bark_dev[14]
            analysisObject.bark_16_dev = bark_dev[15]
            analysisObject.bark_17_dev = bark_dev[16]
            analysisObject.bark_18_dev = bark_dev[17]
            analysisObject.bark_19_dev = bark_dev[18]
            analysisObject.bark_20_dev = bark_dev[19]
            analysisObject.bark_21_dev = bark_dev[20]
            analysisObject.bark_22_dev = bark_dev[21]
            analysisObject.bark_23_dev = bark_dev[22]
            analysisObject.bark_24_dev = bark_dev[23]
            analysisObject.bark_25_dev = bark_dev[24]
            analysisObject.bark_26_dev = bark_dev[25]
            analysisObject.bark_27_dev = bark_dev[26]

            analysisObject.bark_kurtosis = np.mean(neqSpectralResults[1])
            analysisObject.bark_skewness = np.mean(neqSpectralResults[2])
            analysisObject.bark_spread = np.mean(neqSpectralResults[3])

            analysisObject.bark_kurtosis_dev = np.std(neqSpectralResults[1])
            analysisObject.bark_skewness_dev = np.std(neqSpectralResults[2])
            analysisObject.bark_spread_dev = np.std(neqSpectralResults[3])

            analysisObject.hfc = np.mean(neqSpectralResults[4])
            analysisObject.hfc_dev = np.std(neqSpectralResults[4])

            # MFCCs
            mfcc_mean = np.mean(neqSpectralResults[5], axis=0)
            analysisObject.mfcc_1_mean = mfcc_mean[0]
            analysisObject.mfcc_2_mean = mfcc_mean[1]
            analysisObject.mfcc_3_mean = mfcc_mean[2]
            analysisObject.mfcc_4_mean = mfcc_mean[3]
            analysisObject.mfcc_5_mean = mfcc_mean[4]
            analysisObject.mfcc_6_mean = mfcc_mean[5]
            analysisObject.mfcc_7_mean = mfcc_mean[6]
            analysisObject.mfcc_8_mean = mfcc_mean[7]
            analysisObject.mfcc_9_mean = mfcc_mean[8]
            analysisObject.mfcc_10_mean = mfcc_mean[9]
            analysisObject.mfcc_11_mean = mfcc_mean[10]
            analysisObject.mfcc_12_mean = mfcc_mean[11]
            analysisObject.mfcc_13_mean = mfcc_mean[12]

            mfcc_dev = np.std(neqSpectralResults[5], axis=0)
            analysisObject.mfcc_1_dev = mfcc_dev[0]
            analysisObject.mfcc_2_dev = mfcc_dev[1]
            analysisObject.mfcc_3_dev = mfcc_dev[2]
            analysisObject.mfcc_4_dev = mfcc_dev[3]
            analysisObject.mfcc_5_dev = mfcc_dev[4]
            analysisObject.mfcc_6_dev = mfcc_dev[5]
            analysisObject.mfcc_7_dev = mfcc_dev[6]
            analysisObject.mfcc_8_dev = mfcc_dev[7]
            analysisObject.mfcc_9_dev = mfcc_dev[8]
            analysisObject.mfcc_10_dev = mfcc_dev[9]
            analysisObject.mfcc_11_dev = mfcc_dev[10]
            analysisObject.mfcc_12_dev = mfcc_dev[11]
            analysisObject.mfcc_13_dev = mfcc_dev[12]

            analysisObject.pitch_salience = np.mean(neqSpectralResults[8])
            analysisObject.spectral_complexity = np.mean(
                neqSpectralResults[12])
            analysisObject.spectral_crest = np.mean(neqSpectralResults[13])
            analysisObject.spectral_decrease = np.mean(neqSpectralResults[14])
            analysisObject.spectral_energy = np.mean(neqSpectralResults[15])
            analysisObject.spectral_energyband_low = np.mean(
                neqSpectralResults[16])
            analysisObject.spectral_energyband_middle_low = np.mean(
                neqSpectralResults[17])
            analysisObject.spectral_energyband_middle_high = np.mean(
                neqSpectralResults[18])
            analysisObject.spectral_energyband_high = np.mean(
                neqSpectralResults[19])
            analysisObject.spectral_flatness_db = np.mean(
                neqSpectralResults[20])
            analysisObject.spectral_flux = np.mean(neqSpectralResults[21])
            analysisObject.spectral_rms = np.mean(neqSpectralResults[22])
            analysisObject.spectral_rolloff = np.mean(neqSpectralResults[23])
            analysisObject.spectral_strongpeak = np.mean(
                neqSpectralResults[24])
            analysisObject.zero_crossing_rate = np.mean(neqSpectralResults[25])
            analysisObject.inharmonicity = np.mean(neqSpectralResults[26])

            analysisObject.pitch_salience_dev = np.std(neqSpectralResults[8])
            analysisObject.spectral_complexity_dev = np.std(
                neqSpectralResults[12])
            analysisObject.spectral_crest_dev = np.std(neqSpectralResults[13])
            analysisObject.spectral_decrease_dev = np.std(
                neqSpectralResults[14])
            analysisObject.spectral_energy_dev = np.std(neqSpectralResults[15])
            analysisObject.spectral_energyband_low_dev = np.std(
                neqSpectralResults[16])
            analysisObject.spectral_energyband_middle_low_dev = np.std(
                neqSpectralResults[17])
            analysisObject.spectral_energyband_middle_high_dev = np.std(
                neqSpectralResults[18])
            analysisObject.spectral_energyband_high_dev = np.std(
                neqSpectralResults[19])
            analysisObject.spectral_flatness_db_dev = np.std(
                neqSpectralResults[20])
            analysisObject.spectral_flux_dev = np.std(neqSpectralResults[21])
            analysisObject.spectral_rms_dev = np.std(neqSpectralResults[22])
            analysisObject.spectral_rolloff_dev = np.std(
                neqSpectralResults[23])
            analysisObject.spectral_strongpeak_dev = np.std(
                neqSpectralResults[24])
            analysisObject.zero_crossing_rate_dev = np.std(
                neqSpectralResults[25])
            analysisObject.inharmonicity_dev = np.std(neqSpectralResults[26])

            tristimulus = np.mean(neqSpectralResults[27], axis=0)
            analysisObject.tristimulus_1 = tristimulus[0]
            analysisObject.tristimulus_2 = tristimulus[1]
            analysisObject.tristimulus_3 = tristimulus[2]

            tristimulus_dev = np.std(neqSpectralResults[27], axis=0)
            analysisObject.tristimulus_1_dev = tristimulus_dev[0]
            analysisObject.tristimulus_2_dev = tristimulus_dev[1]
            analysisObject.tristimulus_3_dev = tristimulus_dev[2]

            # Spectral extractor with equal loudness filter
            eqSpectralExtractor = es.LowLevelSpectralEqloudExtractor(
                frameSize=frameSize, hopSize=hopSize)
            eqSpectralResults = eqSpectralExtractor(eqAudio)

            analysisObject.spectral_centroid = np.mean(eqSpectralResults[3])
            analysisObject.spectral_kurtosis = np.mean(eqSpectralResults[4])
            analysisObject.spectral_skewness = np.mean(eqSpectralResults[5])
            analysisObject.spectral_spread = np.mean(eqSpectralResults[6])

            analysisObject.spectral_centroid_dev = np.std(eqSpectralResults[3])
            analysisObject.spectral_kurtosis_dev = np.std(eqSpectralResults[4])
            analysisObject.spectral_skewness_dev = np.std(eqSpectralResults[5])
            analysisObject.spectral_spread_dev = np.std(eqSpectralResults[6])

            analysisObject.save()

            i = i + 1
            self.stdout.write("\t\t%2.2f%%" % (100.0 *
                                               (i / float(numSamples))),
                              ending='\r')
            self.stdout.flush()

        self.stdout.write("\r", ending='\r')
        self.stdout.flush()
コード例 #4
0
    def temporal_centroid(self, audio):

        envelope = es.Envelope()
        temporal = es.Centroid(range=(float(len(audio) - 1) / 44100))

        return temporal(envelope(audio))
コード例 #5
0
class DevWrap(QaWrapper):
    """
    Developmet Solution.
    """

    # parameters
    frame_size = frame_size
    hop_size = hop_size
    fs = 44100.

    threshold = -50
    prepower_threshold = -30

    prepower_time = .04  #s

    min_time = .01  #s
    max_time = 3.5  #s

    attackTime = .05
    releaseTime = .05

    # private variables
    _threshold = es.essentia.db2amp(threshold)
    _prepower_threshold = es.essentia.db2amp(prepower_threshold)**2
    _prepower_samples = int(prepower_time * fs)
    l_buffer = np.zeros(_prepower_samples)
    _gaps = []

    envelope = es.Envelope(releaseTime=releaseTime, attackTime=attackTime)
    medianFilter = es.MedianFilter()

    def compute(self, *args):
        y = []
        x = args[1]
        for frame_idx, frame in enumerate(
                es.FrameGenerator(x,
                                  frameSize=self.frame_size,
                                  hopSize=self.hop_size,
                                  startFromZero=True)):
            # frame = es.essentia.normalize(frame)
            # updating buffers
            for gap in self._gaps:
                if not gap['finished'] and not gap['active']:
                    last = np.min([self.frame_size, gap['take']])
                    gap['take'] -= last
                    gap['buffer'] = np.hstack([gap['buffer'], frame[:last]])
                    if gap['take'] <= 0:
                        gap['finished'] = True
            remove_idx = []
            for gap_idx, gap in enumerate(self._gaps):
                if gap['finished']:
                    remove_idx.append(gap_idx)
                    postpower = instantPower(esarr(gap['buffer']))
                    if postpower > self._prepower_threshold:
                        if self.min_time <= gap['end'] - gap[
                                'start'] <= self.max_time:
                            y.append(gap['start'])

            remove_idx.sort(reverse=True)
            for i in remove_idx:
                self._gaps.pop(i)

            x1 = self.envelope(frame)
            x2 = esarr(x1 > self._threshold)

            x3 = self.medianFilter(x2).round().astype(int)

            x3_d = np.zeros(len(x3))

            start_proc = int(self.frame_size / 2 - self.hop_size / 2)
            end_proc = int(self.frame_size / 2 + self.hop_size / 2)
            for i in range(start_proc, end_proc):

                x3_d[i] = x3[i] - x3[i - 1]

            s_dx = np.argwhere(x3_d == -1)
            e_dx = np.argwhere(x3_d == 1)

            # initializing
            if s_dx.size:
                offset = frame_idx * self.hop_size
                for s in s_dx:
                    s = s[0]
                    take_from_buffer = s - self._prepower_samples
                    if take_from_buffer > 0:
                        prepower = instantPower(frame[take_from_buffer:s])
                    else:
                        prepower = instantPower(
                            esarr(
                                np.hstack([
                                    self.l_buffer[-np.abs(take_from_buffer):],
                                    frame[:s]
                                ])))
                    if prepower > self._prepower_threshold:
                        self._gaps.append({
                            'start': (offset + s) / self.fs,
                            'end': 0,
                            'buffer': [],
                            'take': 0,
                            'active': True,
                            'finished': False
                        })

            # finishing
            if e_dx.size and self._gaps:
                offset = frame_idx * self.hop_size
                for e in e_dx:
                    e = e[0]
                    take_from_next_frame = np.max([
                        (self._prepower_samples + e) - self.frame_size, 0
                    ])
                    for gap in self._gaps:
                        if gap['active']:
                            gap['take'] = take_from_next_frame
                            gap['end'] = (offset + e) / self.fs
                            last = np.min(
                                [self.frame_size, e + self._prepower_samples])
                            gap['buffer'] = frame[e:last]
                            gap['active'] = False
                            break

            # update buffers
            update_num = np.min([self._prepower_samples, self.hop_size])
            np.roll(self.l_buffer, -update_num)
            self.l_buffer[-update_num:] = frame[-update_num:]

        self._gaps = []
        return esarr(y)
コード例 #6
0
ファイル: sfx.py プロジェクト: hoinx/sms-tools
def compute(audio, pool, options):
    INFO('Computing SFX descriptors...')

    # analysis parameters
    sampleRate = options['sampleRate']
    frameSize = options['frameSize']
    hopSize = options['hopSize']
    windowType = options['windowType']

    # frame algorithms
    frames = ess.FrameGenerator(audio=audio,
                                frameSize=frameSize,
                                hopSize=hopSize)
    window = ess.Windowing(size=frameSize, zeroPadding=0, type=windowType)
    spectrum = ess.Spectrum(size=frameSize)

    # pitch algorithm
    pitch_detection = ess.PitchYinFFT(frameSize=2048, sampleRate=sampleRate)

    # sfx descriptors
    spectral_peaks = ess.SpectralPeaks(sampleRate=sampleRate,
                                       orderBy='frequency')
    harmonic_peaks = ess.HarmonicPeaks()
    inharmonicity = ess.Inharmonicity()
    odd2evenharmonicenergyratio = ess.OddToEvenHarmonicEnergyRatio()
    tristimulus = ess.Tristimulus()

    # used for a nice progress display
    total_frames = frames.num_frames()
    n_frames = 0
    start_of_frame = -frameSize * 0.5
    progress = Progress(total=total_frames)

    for frame in frames:

        frameScope = [
            start_of_frame / sampleRate,
            (start_of_frame + frameSize) / sampleRate
        ]
        # pool.setCurrentScope(frameScope)

        if options['skipSilence'] and es.isSilent(frame):
            total_frames -= 1
            start_of_frame += hopSize
            continue

        frame_windowed = window(frame)
        frame_spectrum = spectrum(frame_windowed)

        # pitch descriptors
        frame_pitch, frame_pitch_confidence = pitch_detection(frame_spectrum)

        # spectral peaks based descriptors
        frame_frequencies, frame_magnitudes = spectral_peaks(frame_spectrum)

        # ERROR CORRECTION - hoinx 2015-12
        errIdx = np.where(frame_frequencies < 1)
        frame_frequencies = np.delete(frame_frequencies, errIdx)
        frame_magnitudes = np.delete(frame_magnitudes, errIdx)

        (frame_harmonic_frequencies,
         frame_harmonic_magnitudes) = harmonic_peaks(frame_frequencies,
                                                     frame_magnitudes,
                                                     frame_pitch)
        if len(frame_harmonic_frequencies) > 1:
            frame_inharmonicity = inharmonicity(frame_harmonic_frequencies,
                                                frame_harmonic_magnitudes)
            pool.add(namespace + '.' + 'inharmonicity', frame_inharmonicity)
            frame_tristimulus = tristimulus(frame_harmonic_frequencies,
                                            frame_harmonic_magnitudes)
            pool.add(namespace + '.' + 'tristimulus', frame_tristimulus)
            frame_odd2evenharmonicenergyratio = odd2evenharmonicenergyratio(
                frame_harmonic_frequencies, frame_harmonic_magnitudes)
            pool.add(namespace + '.' + 'odd2evenharmonicenergyratio',
                     frame_odd2evenharmonicenergyratio)

        # display of progress report
        progress.update(n_frames)

        n_frames += 1
        start_of_frame += hopSize

    envelope = ess.Envelope()
    file_envelope = envelope(audio)

    # temporal statistics
    decrease = ess.Decrease()
    pool.add(namespace + '.' + 'temporal_decrease',
             decrease(file_envelope))  # , pool.GlobalScope)

    centralmoments = ess.CentralMoments()
    file_centralmoments = centralmoments(file_envelope)

    distributionshape = ess.DistributionShape()
    (file_spread, file_skewness,
     file_kurtosis) = distributionshape(file_centralmoments)
    pool.add(namespace + '.' + 'temporal_spread',
             file_spread)  # , pool.GlobalScope)
    pool.add(namespace + '.' + 'temporal_skewness',
             file_skewness)  # , pool.GlobalScope)
    pool.add(namespace + '.' + 'temporal_kurtosis',
             file_kurtosis)  # , pool.GlobalScope)

    centroid = ess.Centroid()
    pool.add(namespace + '.' + 'temporal_centroid',
             centroid(file_envelope))  # , pool.GlobalScope)

    # effective duration
    effectiveduration = ess.EffectiveDuration()
    pool.add(namespace + '.' + 'effective_duration',
             effectiveduration(file_envelope))  # , pool.GlobalScope)

    # log attack time
    logattacktime = ess.LogAttackTime()
    pool.add(namespace + '.' + 'logattacktime',
             logattacktime(audio))  # , pool.GlobalScope)

    # strong decay
    strongdecay = ess.StrongDecay()
    pool.add(namespace + '.' + 'strongdecay',
             strongdecay(file_envelope))  # , pool.GlobalScope)

    # dynamic profile
    flatness = ess.FlatnessSFX()
    pool.add(namespace + '.' + 'flatness',
             flatness(file_envelope))  # , pool.GlobalScope)
    """
    # onsets number
    onsets_number = len(pool['rhythm.onset_times'][0])
    pool.add(namespace + '.' + 'onsets_number', onsets_number)  # , pool.GlobalScope)
    """

    # morphological descriptors
    max_to_total = ess.MaxToTotal()
    pool.add(namespace + '.' + 'max_to_total',
             max_to_total(file_envelope))  # , pool.GlobalScope)

    tc_to_total = ess.TCToTotal()
    pool.add(namespace + '.' + 'tc_to_total',
             tc_to_total(file_envelope))  # , pool.GlobalScope)

    derivativeSFX = ess.DerivativeSFX()
    (der_av_after_max, max_der_before_max) = derivativeSFX(file_envelope)
    pool.add(namespace + '.' + 'der_av_after_max',
             der_av_after_max)  # , pool.GlobalScope)
    pool.add(namespace + '.' + 'max_der_before_max',
             max_der_before_max)  # , pool.GlobalScope)

    # pitch profile
    """
    pitch = pool['lowlevel.pitch']

    if len(pitch) > 1:
        pool.add(namespace + '.' + 'pitch_max_to_total', max_to_total(pitch))  # , pool.GlobalScope)

        min_to_total = ess.MinToTotal()
        pool.add(namespace + '.' + 'pitch_min_to_total', min_to_total(pitch))  # , pool.GlobalScope)

        pitch_centroid = ess.Centroid(range=len(pitch) - 1)
        pool.add(namespace + '.' + 'pitch_centroid', pitch_centroid(pitch))  # , pool.GlobalScope)

        pitch_after_max_to_before_max_energy_ratio = ess.AfterMaxToBeforeMaxEnergyRatio()
        pool.add(namespace + '.' + 'pitch_after_max_to_before_max_energy_ratio',
                 pitch_after_max_to_before_max_energy_ratio(pitch))  # , pool.GlobalScope)

    else:
        pool.add(namespace + '.' + 'pitch_max_to_total', 0.0)  # , pool.GlobalScope)
        pool.add(namespace + '.' + 'pitch_min_to_total', 0.0)  # , pool.GlobalScope)
        pool.add(namespace + '.' + 'pitch_centroid', 0.0)  # , pool.GlobalScope)
        pool.add(namespace + '.' + 'pitch_after_max_to_before_max_energy_ratio', 0.0)  # , pool.GlobalScope)
    """

    progress.finish()