Exemplo n.º 1
0
        diar_res.data_object.label = label
        diar_res.data_object.time = time
        diar_res.data_object.duration = duration
        diar_res.data_object.label_metadata.label = dict()
        for lab in diar_res.data_object.label:
            diar_res.data_object.label_metadata.label[lab] = str(lab)

        self.add_result(diar_res)


# Generate Grapher for IRITSpeech4Hz analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayLimsiDiarization = DisplayAnalyzer.create(
    analyzer=LimsiDiarization,
    analyzer_parameters={'sad_model': 'etape'},
    result_id='limsi_diarization.speakers',
    grapher_id='grapher_limsi_diarization_speakers',
    grapher_name='Speaker diarization (ETAPE)',
    background='waveform',
    staging=False)

DisplayLimsiDiarization = DisplayAnalyzer.create(
    analyzer=LimsiDiarization,
    analyzer_parameters={'sad_model': 'maya'},
    result_id='limsi_diarization.speakers',
    grapher_id='grapher_limsi_diarization_speakers_maya',
    grapher_name='Speaker diarization (Mayan)',
    background='waveform',
    staging=False)
Exemplo n.º 2
0
    def post_process(self):
        pitch = self.new_result(data_mode='value', time_mode='framewise')

        # parameters : None # TODO check with Piem "default" and "freq" in
        # setup

        pitch.id_metadata.id += '.' + "pitch"
        pitch.id_metadata.name += ' ' + "pitch"
        pitch.id_metadata.unit = "Hz"
        pitch.data_object.value = self.pitches
        self.add_result(pitch)

        pitch_confidence = self.new_result(
            data_mode='value', time_mode='framewise')
        pitch_confidence.id_metadata.id += '.' + "pitch_confidence"
        pitch_confidence.id_metadata.name += ' ' + "pitch confidence"
        pitch_confidence.id_metadata.unit = None
        pitch_confidence.data_object.value = self.pitch_confidences
        self.add_result(pitch_confidence)


# Generate Grapher for Aubio Pitch analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayAubioPitch = DisplayAnalyzer.create(
    analyzer=AubioPitch,
    result_id='aubio_pitch.pitch',
    grapher_id='grapher_aubio_pitch',
    grapher_name='Pitch',
    background='spectrogram')
Exemplo n.º 3
0
        #

        for a in range(1, len(debut)):
            time = float(fin[a] - debut[a]) / 100

        sing_result = self.new_result(data_mode='label', time_mode='segment')
        # sing_result.id_metadata.id += '.' + 'segment'
        sing_result.data_object.label = label
        sing_result.data_object.time = np.asarray(debut) / 100
        sing_result.data_object.duration = (np.asarray(fin) -
                                            np.asarray(debut)) / 100
        sing_result.data_object.label_metadata.label = {
            0: 'No Singing',
            1: 'Singing'
        }
        self.add_result(sing_result)


# Generate Grapher for Labri Singing detection analyzer
from timeside.core.grapher import DisplayAnalyzer

# Labri Singing
DisplayLABRI_SING = DisplayAnalyzer.create(
    analyzer=LabriSing,
    analyzer_parameters={},
    result_id='labri_singing',
    grapher_id='grapher_labri_singing',
    grapher_name='Singing voice detection',
    background='waveform',
    staging=False)
Exemplo n.º 4
0
            dmap[c] = (vmin, distances)

        vmin = 10
        selected_c = None

        for c in dmap:
            if dmap[c][0] < vmin:
                selected_c = c
                vmin = dmap[c][0]

        if selected_c is not None and len(dmap[selected_c][1]) > 0:
            for d, c in dmap[selected_c][1]:
                selected_c.fusion(c)
                classes.remove(c)

    segments = []
    for i, group in enumerate(classes):
        for seg in group.segments:
            segments += [(seg[0], seg[1], i)]

    return segments

from timeside.core.grapher import DisplayAnalyzer

DisplayIritSingingTurns = DisplayAnalyzer.create(
    analyzer=IRITDECAP,
    result_id='irit_singing_turns',
    grapher_id='grapher_irit_singingturns',
    grapher_name='Singings turns',
    staging=True)
Exemplo n.º 5
0
        # S[S<1e-3]=0
        np.maximum(S, 1e-3, out=S)

        # Differentiator filter
        df_filter = signal.fir_filter_design.remez(31, [0, 0.5], [Pi],
                                                   type='differentiator')

        S_diff = signal.lfilter(df_filter, 1, S, axis=0)
        S_diff[S_diff < 1e-10] = 0

        # Summation along the frequency axis
        odf_diff = S_diff.sum(axis=1)
        odf_median = np.median(odf_diff)
        if odf_median:
            odf_diff = odf_diff / odf_median  # Normalize

        odf = self.new_result(data_mode='value', time_mode='framewise')
        #odf.parameters = {'FFT_SIZE': self.FFT_SIZE}
        odf.data_object.value = odf_diff
        self.add_result(odf)


# Generate Grapher for Onset Detection Function analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayOnsetDetectionFunction = DisplayAnalyzer.create(
    analyzer=OnsetDetectionFunction,
    result_id='onset_detection_function',
    grapher_id='grapher_onset_detection_function',
    grapher_name='Onset detection')
Exemplo n.º 6
0
        vmin = 10
        selected_c = None

        for c in dmap:
            if dmap[c][0] < vmin:
                selected_c = c
                vmin = dmap[c][0]

        if selected_c is not None and len(dmap[selected_c][1]) > 0:
            for d, c in dmap[selected_c][1]:
                selected_c.fusion(c)
                classes.remove(c)

    segments = []
    for i, group in enumerate(classes):
        for seg in group.segments:
            segments += [(seg[0], seg[1], i)]

    return segments


from timeside.core.grapher import DisplayAnalyzer

DisplayIritSingingTurns = DisplayAnalyzer.create(
    analyzer=IRITDECAP,
    result_id='irit_singing_turns',
    grapher_id='grapher_irit_singingturns',
    grapher_name='Singings turns',
    staging=True)
        # segList = segmentFromValues(medfilt(modEnergyValue > self.threshold, 31))

        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'

        segs.data_object.label_metadata.label = label

        segs.data_object.label = [convert[s[2]] for s in segList]
        segs.data_object.time = [tLine[s[0]] for s in segList]
        segs.data_object.duration = [tLine[s[1]] - tLine[s[0]]
                                     for s in segList]

        self.add_result(segs)
        return


def getBoundariesInInterval(start, stop, boundaries):
    return [t for t in boundaries if t >= start and t <= stop]

# Generate Grapher for IRITMusicSNB analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayMusicSNB = DisplayAnalyzer.create(
    analyzer=IRITMusicSNB,
    result_id='irit_music_snb.segments',
    grapher_id='grapher_irit_music_snb_segments',
    grapher_name='Music Detector - Segment Number',
    background='waveform',
    staging=True)
    l1 = len(v1)
    l2 = len(v2)
    decal = numpy.abs(m1 - m2)

    if m1 >= m2:
        fin = numpy.min([l1 - decal, l2])
        if fin - decal > min_overlap:

            v1_out = numpy.array(v1[decal:decal + fin])
            v2_out = numpy.array(v2[:fin])
            d = numpy.mean(numpy.abs(v1_out - v2_out))
        else:
            v1_out = [0]
            v2_out = [1]
            d = 1
    else:
        return computeDist(v2, v1, min_overlap)

    return d, v1_out, v2_out


# Generate Grapher for IRITStartSeg analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayIRIT_Start = DisplayAnalyzer.create(
    analyzer=IRITStartSeg,
    result_id='irit_startseg.segments',
    grapher_id='grapher_irit_startseg',
    grapher_name='Analogous start point',
    background='waveform',
    staging=False)
Exemplo n.º 9
0
    def name():
        return "Constant Q transform from QMUL vamp plugins"

    @staticmethod
    @interfacedoc
    def unit():
        return ""

    def post_process(self):
        super(VampConstantQ, self).post_process()  # get remaining results

        constant_q = self.new_result(data_mode='value', time_mode='framewise')

        midi_pitches = np.arange(self.minpitch, self.maxpitch, 12.0 / self.bpo)
        constant_q.data_object.y_value = [midi2freq(midi_number=p, tuningA4=self.tuning)
                                          for p in midi_pitches]

        constant_q.data_object.value = self.vamp_results['matrix'][1]
        self.add_result(constant_q)


# Generate Grapher for CQT analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayCQT = DisplayAnalyzer.create(
    analyzer=VampConstantQ,
    result_id='vamp_constantq',
    grapher_id='grapher_vamp_cqt',
    grapher_name='Constant Q Transform',
    staging=False)
Exemplo n.º 10
0
        #        fin[a]=fin[a+1]
        #        fin=np.delete(fin,a)
        #        label=np.delete(label,a)
        #

        for a in range(1, len(debut)):
            time = float(fin[a] - debut[a]) / 100

        sing_result = self.new_result(data_mode='label', time_mode='segment')
        # sing_result.id_metadata.id += '.' + 'segment'
        sing_result.data_object.label = label
        sing_result.data_object.time = np.asarray(debut) /100
        sing_result.data_object.duration = (np.asarray(fin) - np.asarray(debut)) / 100
        sing_result.data_object.label_metadata.label = {0: 'No Singing', 1: 'Singing'}
        self.add_result(sing_result)


# Generate Grapher for Labri Singing detection analyzer
from timeside.core.grapher import DisplayAnalyzer

# Labri Singing
DisplayLABRI_SING = DisplayAnalyzer.create(
    analyzer=LabriSing,
    analyzer_parameters={},
    result_id='labri_singing',
    grapher_id='grapher_labri_singing',
    grapher_name='Labri singing voice detection',
    background='waveform',
    staging=False)
    
Exemplo n.º 11
0
                                  self.input_samplerate)
                                 for s in segList_filt]
        med_segs.data_object.duration = [(np.float(s[1] - s[0] + 1) * self.input_stepsize /
                                      self.input_samplerate)
                                     for s in segList_filt]

        self.add_result(med_segs)

        return


# Generate Grapher for IRITSpeech4Hz analyzer
from timeside.core.grapher import DisplayAnalyzer

Display4hzSpeechSegmentation = DisplayAnalyzer.create(
    analyzer=IRITSpeech4Hz,
    result_id='irit_speech_4hz.segments',
    grapher_id='grapher_irit_speech_4hz_segments',
    grapher_name='Speech activity - 4hz',
    background='waveform',
    staging=True)

# IRIT 4Hz with median filter
Display4hzSpeechSegmentation = DisplayAnalyzer.create(
    analyzer=IRITSpeech4Hz,
    result_id='irit_speech_4hz.segments_median',
    grapher_id='grapher_irit_speech_4hz_segments_median',
    grapher_name='Speech detection (syllabic rate)',
    background='waveform',
    staging=False)
Exemplo n.º 12
0
    def process(self, frames, eod=False):
        self.values.append(np.abs(np.fft.rfft(frames, self.fft_size)))
        return frames, eod

    def post_process(self):
        spectrogram = self.new_result(data_mode='value', time_mode='framewise')
        spectrogram.parameters = {'fft_size': self.fft_size}
        # spectrogram.data_object.value = self.values['spectrogram']
        spectrogram.data_object.value = self.values
        nb_freq = spectrogram.data_object.value.shape[1]
        spectrogram.data_object.y_value = (np.arange(0, nb_freq) *
                                           self.samplerate() / self.fft_size)
        self.add_result(spectrogram)


# Generate Grapher for Spectrogram analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayLinearSpectrogram = DisplayAnalyzer.create(
    analyzer=Spectrogram,
    result_id='spectrogram_analyzer',
    grapher_id='spectrogram',
    grapher_name='Linear Spectrogram',
    staging=False)


if __name__ == "__main__":
    import doctest
    import timeside
    doctest.testmod(timeside.plugins.analyzer.spectrogram, verbose=True)
Exemplo n.º 13
0
        # Output result

        gated_loudness = self.new_result(data_mode='value', time_mode='global')
        gated_loudness.data_object.value = GatedLoudness
        gated_loudness.id_metadata.id += '.gated_loudness'
        gated_loudness.id_metadata.name += 'Gated Loudness'
        self.add_result(gated_loudness)

        relative_threshold = self.new_result(data_mode='value', time_mode='global')
        relative_threshold.data_object.value = Gamma_r
        relative_threshold.id_metadata.id += '.relative_threshold'
        relative_threshold.id_metadata.name += 'Relative Threshold'
        self.add_result(relative_threshold)

        block_loudness = self.new_result(data_mode='value', time_mode='framewise')
        block_loudness.data_object.value = self.l
        block_loudness.id_metadata.id += '.block_loudness'
        block_loudness.id_metadata.name += ' Block Loudness'
        self.add_result(block_loudness)


# Generate Grapher for Loudness ITU
from timeside.core.grapher import DisplayAnalyzer

DisplayLoudnessITU = DisplayAnalyzer.create(
    analyzer=LoudnessITU,
    result_id='loudness_itu.block_loudness',
    grapher_id='grapher_loudness_itu',
    grapher_name='Loudness ITU',
    staging=False)
Exemplo n.º 14
0
        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'

        segs.data_object.label_metadata.label = label

        segs.data_object.label = [convert[s[2]] for s in segList]
        segs.data_object.time = [tLine[s[0]] for s in segList]
        segs.data_object.duration = [
            tLine[s[1]] - tLine[s[0]] for s in segList
        ]

        self.add_result(segs)
        return


def getBoundariesInInterval(start, stop, boundaries):
    return [t for t in boundaries if t >= start and t <= stop]


# Generate Grapher for IRITMusicSNB analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayMusicSNB = DisplayAnalyzer.create(
    analyzer=IRITMusicSNB,
    result_id='irit_music_snb.segments',
    grapher_id='grapher_irit_music_snb_segments',
    grapher_name='Music Detector - Segment Number',
    background='waveform',
    staging=True)
    l1 = len(v1)
    l2 = len(v2)
    decal = numpy.abs(m1 - m2)

    if m1 >= m2:
        fin = numpy.min([l1 - decal, l2])
        if fin - decal > min_overlap:

            v1_out = numpy.array(v1[decal:decal + fin])
            v2_out = numpy.array(v2[:fin])
            d = numpy.mean(numpy.abs(v1_out - v2_out))
        else:
            v1_out = [0]
            v2_out = [1]
            d = 1
    else:
        return computeDist(v2, v1, min_overlap)

    return d, v1_out, v2_out


# Generate Grapher for IRITStartSeg analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayIRIT_Start = DisplayAnalyzer.create(
    analyzer=IRITStartSeg,
    result_id='irit_startseg.segments',
    grapher_id='grapher_irit_startseg',
    grapher_name='Analogous start point',
    background='waveform',
    staging=False)
Exemplo n.º 16
0
    Boundaries in samples
    """
    data = map(abs, data)
    boundaries_sample = map(lambda b: int(b*fe), boundaries)
    w = w_len*fe
    l = len(data)

    return zip(boundaries, [sum(data[b:int(min(b+w, l))])-sum(data[int(max([0, b-w])):b]) for b in boundaries_sample])


def get_tempo_spectrum(boundaries, freq_range):
    """
    """

    pos, wei = map(array, zip(*boundaries))
    j = complex(0, 1)
    return map(lambda f: abs(sum(exp(-2.0 * j * pi * f * pos)*wei)), freq_range)




# Generate Grapher for IRITSpeech4Hz analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayIritTempogram = DisplayAnalyzer.create(
    analyzer=IRITTempogram,
    result_id='irit_tempogram',
    grapher_id='grapher_irit_tempogram',
    grapher_name='Tempogram - Divergence',
    staging=True)
Exemplo n.º 17
0
        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'

        segs.data_object.label_metadata.label = label

        segs.data_object.label = [convert[s[2]] for s in segList]
        segs.data_object.time = [tLine[s[0]] for s in segList]
        segs.data_object.duration = [tLine[s[1]] - tLine[s[0]]
                                     for s in segList]

        self.add_result(segs)
        return


def getBoundariesInInterval(start, stop, boundaries):
    return [t for t in boundaries if t >= start and t <= stop]


# Generate Grapher for IRITMusicSLN analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayMusicSLN = DisplayAnalyzer.create(
    analyzer=IRITMusicSLN,
    result_id='irit_music_sln.segments',
    grapher_id='grapher_irit_music_sln_segments',
    grapher_name='Music detection',
    background='waveform',
    staging=False)
Exemplo n.º 18
0
    @frames_adapter
    def process(self, frames, eod=False):
        self.values.append(np.abs(np.fft.rfft(frames, self.fft_size)))
        return frames, eod

    def post_process(self):
        spectrogram = self.new_result(data_mode='value', time_mode='framewise')
        spectrogram.parameters = {'fft_size': self.fft_size}
        # spectrogram.data_object.value = self.values['spectrogram']
        spectrogram.data_object.value = self.values
        nb_freq = spectrogram.data_object.value.shape[1]
        spectrogram.data_object.y_value = (np.arange(0, nb_freq) *
                                           self.samplerate() / self.fft_size)
        self.add_result(spectrogram)


# Generate Grapher for Spectrogram analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayLinearSpectrogram = DisplayAnalyzer.create(
    analyzer=Spectrogram,
    result_id='spectrogram_analyzer',
    grapher_id='spectrogram',
    grapher_name='Linear Spectrogram',
    staging=False)

if __name__ == "__main__":
    import doctest
    import timeside
    doctest.testmod(timeside.plugins.analyzer.spectrogram, verbose=True)
Exemplo n.º 19
0
        gated_loudness = self.new_result(data_mode='value', time_mode='global')
        gated_loudness.data_object.value = GatedLoudness
        gated_loudness.id_metadata.id += '.gated_loudness'
        gated_loudness.id_metadata.name += 'Gated Loudness'
        self.add_result(gated_loudness)

        relative_threshold = self.new_result(data_mode='value',
                                             time_mode='global')
        relative_threshold.data_object.value = Gamma_r
        relative_threshold.id_metadata.id += '.relative_threshold'
        relative_threshold.id_metadata.name += 'Relative Threshold'
        self.add_result(relative_threshold)

        block_loudness = self.new_result(data_mode='value',
                                         time_mode='framewise')
        block_loudness.data_object.value = self.l
        block_loudness.id_metadata.id += '.block_loudness'
        block_loudness.id_metadata.name += ' Block Loudness'
        self.add_result(block_loudness)


# Generate Grapher for Loudness ITU
from timeside.core.grapher import DisplayAnalyzer

DisplayLoudnessITU = DisplayAnalyzer.create(
    analyzer=LoudnessITU,
    result_id='loudness_itu.block_loudness',
    grapher_id='grapher_loudness_itu',
    grapher_name='Loudness ITU',
    staging=False)
Exemplo n.º 20
0
        return ""

    @downmix_to_mono
    @frames_adapter
    def process(self, frames, eod=False):
        self.silence.append(silence_detection(frames, self.threshold))
        return frames, eod

    def post_process(self):

        silence = self.new_result(data_mode='label', time_mode='segment')
        silence.data_object.time = (np.arange(0, len(self.silence) * self.input_stepsize,
                                              self.input_stepsize) / self.input_samplerate)
        silence.data_object.label = np.array(self.silence, dtype=int)
        duration = self.input_blocksize / float(self.input_samplerate)
        silence.data_object.duration = np.ones(silence.data_object.label.shape) * duration
        silence.data_object.label_metadata.label = {0: 'Silence', 1: 'Not Silence'}
        silence.data_object.merge_segment()

        self.add_result(silence)

# Generate Grapher
from timeside.core.grapher import DisplayAnalyzer

DisplayAubioSilence = DisplayAnalyzer.create(
    analyzer=AubioSilence,
    result_id='aubio_silence',
    grapher_id='grapher_aubio_silence',
    grapher_name='Aubio Silence',
    staging=False)
Exemplo n.º 21
0
    @staticmethod
    @interfacedoc
    def name():
        return "Waveform Analyzer"

    @staticmethod
    @interfacedoc
    def unit():
        return ""

#    @downmix_to_mono
#    @frames_adapter
    def process(self, frames, eod=False):
        self.values.append(frames)
        return frames, eod

    def post_process(self):
        waveform = self.new_result(data_mode='value', time_mode='framewise')
        waveform.data_object.value = np.vstack(self.values)
        self.add_result(waveform)


# Generate Grapher for Waveform analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayWaveform = DisplayAnalyzer.create(analyzer=Waveform,
                                         result_id='waveform_analyzer',
                                         grapher_id='grapher_waveform',
                                         grapher_name='Waveform from Analyzer',
                                         staging=True)

Exemplo n.º 22
0
        music_result.id_metadata.name = "Labri Music detection"
        music_result.data_object.label = music
        music_result.data_object.time = np.asarray(start_music) / 100
        music_result.data_object.duration = (np.asarray(end_music) - np.asarray(start_music)) / 100
        music_result.data_object.label_metadata.label = {0: 'No Music', 1: 'Music'}
        self.add_result(music_result)


# Generate Grapher for Labri Speech/Music/Noise detection
from timeside.core.grapher import DisplayAnalyzer

# Labri Speech/Music/Noise --> Speech
DisplayLABRI_PMB = DisplayAnalyzer.create(
    analyzer=LabriSMN,
    analyzer_parameters={},
    result_id='labri_speech_music_noise.speech',
    grapher_id='grapher_labri_smn_speech',
    grapher_name='Labri Speech Detection',
    background='waveform',
    staging=True)

# Labri Speech/Music/Noise --> Music
DisplayLABRI_PMB = DisplayAnalyzer.create(
    analyzer=LabriSMN,
    analyzer_parameters={},
    result_id='labri_speech_music_noise.music',
    grapher_id='grapher_labri_smn_music',
    grapher_name='Labri Music Detection',
    background='waveform',
    staging=True)

Exemplo n.º 23
0
    @staticmethod
    @interfacedoc
    def unit():
        return ""

    @downmix_to_mono
    @frames_adapter
    def process(self, frames, eod=False):
        if not eod:
            w_frame = self.windower(essentia.array(frames.squeeze()))
            spectrum = self.spec_alg(w_frame)
            spec, mags = self.spec_peaks_alg(spectrum)
            self.dissonance.append(self.dissonance_alg(spec, mags))
        return frames, eod

    def post_process(self):

        dissonance = self.new_result(data_mode='value', time_mode='framewise')
        dissonance.data_object.value = self.dissonance
        self.add_result(dissonance)


# Generate Grapher for Essentia dissonance
from timeside.core.grapher import DisplayAnalyzer

DisplayDissonance = DisplayAnalyzer.create(analyzer=Essentia_Dissonance,
                                           result_id='essentia_dissonance',
                                           grapher_id='grapher_dissonance',
                                           grapher_name='Dissonance',
                                           staging=False)
Exemplo n.º 24
0
    @downmix_to_mono
    @frames_adapter
    def process(self, frames, eod=False):
        self.silence.append(silence_detection(frames, self.threshold))
        return frames, eod

    def post_process(self):

        silence = self.new_result(data_mode='label', time_mode='segment')
        silence.data_object.time = (np.arange(0, len(self.silence) * self.input_stepsize,
                                              self.input_stepsize) / self.input_samplerate)
        silence.data_object.label = np.array(self.silence, dtype=int)
        duration = self.input_blocksize / float(self.input_samplerate)
        silence.data_object.duration = np.ones(silence.data_object.label.shape) * duration
        silence.data_object.label_metadata.label = {0: 'Silence', 1: 'Not Silence'}
        silence.data_object.merge_segment()

        self.add_result(silence)

# Generate Grapher
from timeside.core.grapher import DisplayAnalyzer

DisplayAubioSilence = DisplayAnalyzer.create(
    analyzer=AubioSilence,
    result_id='aubio_silence',
    grapher_id='grapher_aubio_silence',
    grapher_name='Aubio Silence',
    grapher_version='1.0',
    staging=False)
Exemplo n.º 25
0
    :param beta2:
    :param delta:
    :return:
    """
    m = array(m)
    v = array(v)

    c0 = log(beta1*beta2/(theta1*theta2))
    a1 = m/theta1
    b1 = a1**(beta1/delta)
    c1 = log(a1)
    a2 = v/theta2
    b2 = a2**(beta2/delta)
    c2 = log(a2)
    somme1 = (b1+b2)**delta
    pxy = c0+(beta1/delta-1)*c1+(beta2/delta-1)*c2+(delta-2)*log(b1+b2)+log(somme1+1/delta-1)-somme1

    return mean(pxy)


# Generate Grapher for IRITMonopoly analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayMonopoly = DisplayAnalyzer.create(
    analyzer=IRITMonopoly,
    result_id='irit_monopoly.segments',
    grapher_id='grapher_irit_monopoly_segments',
    grapher_name='Monody/polyphony detection',
    background='waveform',
    staging=False)
Exemplo n.º 26
0
    :param delta:
    :return:
    """
    m = array(m)
    v = array(v)

    c0 = log(beta1 * beta2 / (theta1 * theta2))
    a1 = m / theta1
    b1 = a1**(beta1 / delta)
    c1 = log(a1)
    a2 = v / theta2
    b2 = a2**(beta2 / delta)
    c2 = log(a2)
    somme1 = (b1 + b2)**delta
    pxy = c0 + (beta1 / delta - 1) * c1 + (beta2 / delta - 1) * c2 + (
        delta - 2) * log(b1 + b2) + log(somme1 + 1 / delta - 1) - somme1

    return mean(pxy)


# Generate Grapher for IRITMonopoly analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayMonopoly = DisplayAnalyzer.create(
    analyzer=IRITMonopoly,
    result_id='irit_monopoly.segments',
    grapher_id='grapher_irit_monopoly_segments',
    grapher_name='Monody/polyphony detection',
    background='waveform',
    staging=False)
Exemplo n.º 27
0
        diar_res.data_object.label = label
        diar_res.data_object.time = time
        diar_res.data_object.duration = duration
        diar_res.data_object.label_metadata.label = dict()
        for lab in diar_res.data_object.label:
            diar_res.data_object.label_metadata.label[lab] = str(lab)

        self.add_result(diar_res)


# Generate Grapher for IRITSpeech4Hz analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayLimsiDiarization = DisplayAnalyzer.create(
    analyzer=LimsiDiarization,
    analyzer_parameters={'sad_model': 'etape'},
    result_id='limsi_diarization.speakers',
    grapher_id='grapher_limsi_diarization_speakers',
    grapher_name='Speaker diarization (ETAPE)',
    background='waveform',
    staging=False)

DisplayLimsiDiarization = DisplayAnalyzer.create(
    analyzer=LimsiDiarization, 
    analyzer_parameters={'sad_model': 'maya'},
    result_id='limsi_diarization.speakers',
    grapher_id='grapher_limsi_diarization_speakers_maya',
    grapher_name='Speaker diarization (Mayan)',
    background='waveform',
    staging=False)
Exemplo n.º 28
0
        # S[S<1e-3]=0
        np.maximum(S, 1e-3, out=S)

        # Differentiator filter
        df_filter = signal.fir_filter_design.remez(31, [0, 0.5], [Pi],
                                                   type='differentiator')

        S_diff = signal.lfilter(df_filter, 1, S, axis=0)
        S_diff[S_diff < 1e-10] = 0

        # Summation along the frequency axis
        odf_diff = S_diff.sum(axis=1)
        odf_median = np.median(odf_diff)
        if odf_median:
            odf_diff = odf_diff / odf_median  # Normalize

        odf = self.new_result(data_mode='value', time_mode='framewise')
        #odf.parameters = {'FFT_SIZE': self.FFT_SIZE}
        odf.data_object.value = odf_diff
        self.add_result(odf)


# Generate Grapher for Onset Detection Function analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayOnsetDetectionFunction = DisplayAnalyzer.create(
    analyzer=OnsetDetectionFunction,
    result_id='onset_detection_function',
    grapher_id='grapher_onset_detection_function',
    grapher_name='Onset detection')
Exemplo n.º 29
0
        sad_seg_result.data_object.label_metadata.label = {
            0: 'Not Speech',
            1: 'Speech'
        }

        self.add_result(sad_seg_result)


# Generate Grapher for Limsi SAD analyzer
from timeside.core.grapher import DisplayAnalyzer

# Etape Model
DisplayLIMSI_SAD_etape = DisplayAnalyzer.create(
    analyzer=LimsiSad,
    analyzer_parameters={'sad_model': 'etape'},
    result_id='limsi_sad.sad_segments',
    grapher_id='grapher_limsi_sad_etape',
    grapher_name='Speech activity - ETAPE',
    background='waveform',
    staging=True)

# Mayan Model
DisplayLIMSI_SAD_maya = DisplayAnalyzer.create(
    analyzer=LimsiSad,
    analyzer_parameters={'sad_model': 'maya'},
    result_id='limsi_sad.sad_segments',
    grapher_id='grapher_limsi_sad_maya',
    grapher_name='Speech activity - Mayan',
    background='waveform',
    staging=True)

# Adaptive Model
Exemplo n.º 30
0
        sad_seg_result.data_object.label = labels
        sad_seg_result.data_object.time = times
        sad_seg_result.data_object.duration = durations
        sad_seg_result.data_object.label_metadata.label = {0: 'Not Speech', 1: 'Speech'}

        self.add_result(sad_seg_result)


# Generate Grapher for Limsi SAD analyzer
from timeside.core.grapher import DisplayAnalyzer

# Etape Model
DisplayLIMSI_SAD_etape = DisplayAnalyzer.create(
    analyzer=LimsiSad,
    analyzer_parameters={'sad_model': 'etape'},
    result_id='limsi_sad.sad_segments',
    grapher_id='grapher_limsi_sad_etape',
    grapher_name='Speech activity - ETAPE',
    background='waveform',
    staging=True)

# Mayan Model
DisplayLIMSI_SAD_maya = DisplayAnalyzer.create(
    analyzer=LimsiSad,
    analyzer_parameters={'sad_model': 'maya'},
    result_id='limsi_sad.sad_segments',
    grapher_id='grapher_limsi_sad_maya',
    grapher_name='Speech activity - Mayan',
    background='waveform',
    staging=True)

# Adaptive Model
Exemplo n.º 31
0
def has_vibrato(serie, sampling_rate, minimum_frequency=4, maximum_frequency=8, Nfft=100):
    """
    Calcul de vibrato sur une serie par la méthode de la transformée de Fourier de la dérivée.
    """
    vibrato = False
    frequency_scale = linspace(0, sampling_rate / 2, Nfft / 2)

    index_min_vibrato = argmin(abs(frequency_scale - minimum_frequency))
    index_max_vibrato = argmin(abs(frequency_scale - maximum_frequency))

    derivative = [v1 - v2 for v1, v2 in zip(serie[:-2], serie[1:])]
    fft_derivative = abs(rfft(derivative, Nfft))[:Nfft / 2]
    i_max = argmax(fft_derivative)
    if index_max_vibrato >= i_max >= index_min_vibrato:
        vibrato = True

    return vibrato


# Generate Grapher for IRITSinging analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayIritSinging = DisplayAnalyzer.create(
    analyzer=IRITSinging,
    result_id='irit_singing.segments',
    grapher_id='grapher_irit_singing_segments',
    grapher_name='Singings detection',
    background='waveform',
    staging=True)
        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'

        segs.data_object.label_metadata.label = label

        segs.data_object.label = [convert[s[2]] for s in segList]
        segs.data_object.time = [(float(s[0]) * self.blocksize() /
                                 self.samplerate())
                                 for s in segList]
        segs.data_object.duration = [(float(s[1] - s[0] + 1) * self.blocksize() /
                                     self.samplerate())
                                     for s in segList]

        self.add_result(segs)

        return


    
# Generate Grapher for IRITSpeechEntropy analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayEntropySpeechSegmentation = DisplayAnalyzer.create(
    analyzer=IRITSpeechEntropy,
    result_id='irit_speech_entropy.segments',
    grapher_id='grapher_irit_speech_entropy_segments',
    grapher_name='Speech detection (signal diversity)',
    background='waveform',
    staging=False)
Exemplo n.º 33
0
        return "Constant Q transform from QMUL vamp plugins"

    @staticmethod
    @interfacedoc
    def unit():
        return ""

    def post_process(self):
        super(VampConstantQ, self).post_process()  # get remaining results

        constant_q = self.new_result(data_mode='value', time_mode='framewise')

        midi_pitches = np.arange(self.minpitch, self.maxpitch, 12.0 / self.bpo)
        constant_q.data_object.y_value = [
            midi2freq(midi_number=p, tuningA4=self.tuning)
            for p in midi_pitches
        ]

        constant_q.data_object.value = self.vamp_results['matrix'][1]
        self.add_result(constant_q)


# Generate Grapher for CQT analyzer
from timeside.core.grapher import DisplayAnalyzer

DisplayCQT = DisplayAnalyzer.create(analyzer=VampConstantQ,
                                    result_id='vamp_constantq',
                                    grapher_id='grapher_vamp_cqt',
                                    grapher_name='Constant Q Transform',
                                    staging=False)
Exemplo n.º 34
0
        ]
        med_segs.data_object.duration = [
            (np.float(s[1] - s[0] + 1) * self.input_stepsize /
             self.input_samplerate) for s in segList_filt
        ]

        self.add_result(med_segs)

        return


# Generate Grapher for IRITSpeech4Hz analyzer
from timeside.core.grapher import DisplayAnalyzer

Display4hzSpeechSegmentation = DisplayAnalyzer.create(
    analyzer=IRITSpeech4Hz,
    result_id='irit_speech_4hz.segments',
    grapher_id='grapher_irit_speech_4hz_segments',
    grapher_name='Speech activity - 4hz',
    background='waveform',
    staging=True)

# IRIT 4Hz with median filter
Display4hzSpeechSegmentation = DisplayAnalyzer.create(
    analyzer=IRITSpeech4Hz,
    result_id='irit_speech_4hz.segments_median',
    grapher_id='grapher_irit_speech_4hz_segments_median',
    grapher_name='Speech detection (syllabic rate)',
    background='waveform',
    staging=False)