Ejemplo n.º 1
0
samplerate = zounds.SR22050()
BaseModel = zounds.stft(resample_to=samplerate, store_fft=True)


@zounds.simple_in_memory_settings
class Sound(BaseModel):
    pass


if __name__ == '__main__':
    url = 'https://ia802606.us.archive.org/9/items/AOC11B/onclassical_luisi_bach_partita_e-minor_bwv-830_3.ogg'
    _id = Sound.process(meta=url)
    snd = Sound(_id)

    band = zounds.FrequencyBand(50, samplerate.nyquist)
    bark_scale = zounds.BarkScale(band, 100)
    mel_scale = zounds.MelScale(band, 100)
    chroma_scale = zounds.ChromaScale(band)

    bark_bands = bark_scale.apply(snd.fft, zounds.HanningWindowingFunc())
    mel_bands = mel_scale.apply(snd.fft, zounds.HanningWindowingFunc())
    chroma_bands = chroma_scale.apply(snd.fft, zounds.HanningWindowingFunc())

    app = zounds.ZoundsApp(
        model=Sound,
        visualization_feature=Sound.fft,
        audio_feature=Sound.ogg,
        globals=globals(),
        locals=locals())
    app.start(9999)
Ejemplo n.º 2
0
    higher = zounds.AudioSamples(pitch_shift(original, 1.0).squeeze(), sr)
    lower = zounds.AudioSamples(pitch_shift(original, -1.0).squeeze(), sr)

    # apply a sliding window to demonstrate time stretch and pitch shift in
    # batch mode
    windowing_sr = zounds.SampleRate(frequency=zounds.Seconds(5),
                                     duration=zounds.Seconds(10))

    windowed = snd.resampled.sliding_window(windowing_sr)
    windowed = zounds.ArrayWithUnits(
        windowed, [zounds.IdentityDimension(), windowed.dimensions[1]])

    def samples(x):
        return zounds.AudioSamples(x, sr)

    batch_slow = map(samples, time_stretch(windowed, 0.75))
    batch_fast = map(samples, time_stretch(windowed, 1.25))

    batch_higher = map(samples, pitch_shift(windowed, 1.0))
    batch_lower = map(samples, pitch_shift(windowed, -1.0))

    app = zounds.ZoundsApp(model=Sound,
                           visualization_feature=Sound.fft,
                           audio_feature=Sound.resampled,
                           globals=globals(),
                           locals=locals(),
                           secret=args.app_secret)

    app.start(args.port)
Ejemplo n.º 3
0
    rep = next(make_stream())
    data = rep.data
    data = packed_channels(data)
    data = packed_fractal(data, FRACTAL_WINDOW_SIZE)
    norms = data[:, :1]

    indices = km.predict(data[:, 1:])

    centroids = km.cluster_centers_[indices]
    centroids = np.concatenate([norms, centroids], axis=1)
    centroids = unpacked_fractal_recon(centroids, FRACTAL_WINDOW_SIZE)

    centroids = unpacked_channels(centroids, time_dim)
    recon_rep = rep.__class__(centroids, samplerate)
    return rep, recon_rep


def make_stream():
    return repr_stream(MelPhaseRecovery)


if __name__ == '__main__':
    app = zounds.ZoundsApp(locals=locals(), globals=globals())
    app.start_in_thread(8888)

    rs = make_stream()

    for i, km in learn_clusters(rs):
        print(f'kmeans iter {i}')

    input('Waiting...')
Ejemplo n.º 4
0
        """
        Do a forward and backward pass over the audio, and return the
        reconstructed audio
        """
        if _id:
            doc = Document(_id)
        else:
            doc = choice(docs)
        print doc._id
        recon_audio = full_pass(doc.mdct, kmeans.pipeline)
        recon_audio_log_amp = full_pass(doc.mdct, kmeans_log_amplitude.pipeline)
        return doc.ogg[:], recon_audio, recon_audio_log_amp


    mono_orig, mono, mono_log = reconstruction(
            'FlavioGaete22/TFS1_TReich11.wav')
    bass_orig, bass, bass_log = reconstruction(
            'FlavioGaete22/TFS1_TBass05.wav')
    beat_orig, beat, beat_log = reconstruction(
            'FlavioGaete22/SIKBeat02.wav')
    cello_orig, cello, cello_log = reconstruction(
            'FlavioGaete22/TFS2_TVla09.wav')

    app = zounds.ZoundsApp(
            model=Document,
            audio_feature=Document.ogg,
            visualization_feature=Document.mdct,
            globals=globals(),
            locals=locals())
    app.start(8888)