import jinja2 tuned = Corpus( "/Users/james/Cloud/Projects/ElectroMagnetic/reaper/Convolutions/anchors/media/07-glued.wav" ) db = (Corpus("~/Cloud/Projects/DataBending/DataAudioUnique").duration( min_duration=2, max_duration=20)) unstatic = Corpus( "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/0") static = Corpus( "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1") output = "../../reaper/Convolutions/tuned" analysis = Chain(source=(db + tuned + unstatic + static), folder=output) kdtree = KDTree() dr = UMAP(components=10, cache=1) # we need access to the original data analysis.add( # FluidMFCC(discard=True, numcoeffs=20, fftsettings=[4096, -1, -1], cache=1), LibroCQT(cache=0), Stats(numderivs=1, flatten=True, cache=1), Standardise(cache=1), dr, kdtree) if __name__ == "__main__": analysis.run() pinpoint = tuned.items[0] # single item
# Description """ Split by activity into two clusters hopefully rendering sections that are static, versus those that are more gestural or dynamic """ from ftis.analyser import Flux, Stats, Normalise, AgglomerativeClustering from ftis.process import FTISProcess as Chain from pathlib import Path from shutil import copyfile src = "outputs/segments/2_ExplodeAudio" folder = "outputs/classification" process = Chain( source=src, folder=folder ) cluster = AgglomerativeClustering(numclusters=2) process.add( Flux(cache=False), Stats(numderivs=2), Normalise(), cluster ) if __name__ == "__main__": process.run() # Now implement a quasi one-shot analyser to copy the sound files to individual directories
from ftis.analyser import (FluidOnsetslice, FluidMFCC, Stats, Standardise, UMAP, ExplodeAudio) from ftis.process import FTISProcess as Chain from ftis.corpus import Corpus, PathLoader analysis = Chain(source=Corpus("../../reaper/highgain_source/bounces"), folder="../outputs/em_detailed_segmentation") analysis.add( # Segmentation FluidOnsetslice(framedelta=20, minslicelength=2, filtersize=5, threshold=0.3, metric=0, cache=1), ExplodeAudio(), # Analysis FluidMFCC(discard=True, numcoeffs=20, fftsettings=[4096, 512, 4096]), Stats(numderivs=1), Standardise(), UMAP(components=2)) if __name__ == "__main__": analysis.run()
from ftis.process import FTISProcess as Chain from ftis.corpus import Corpus from ftis.common.io import write_json, get_duration from pathlib import Path import jinja2 """ Give me a crude 3 cluster output of all of the 'static' files. Later on we'll find other files similar to these. We can use both bits of information to help compose. """ output = "../../reaper/Convolutions/base_materials" em = Corpus("~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1") analysis = Chain( source = (em), folder = output ) dr = UMAP(components=10, cache=1) clustering = AgglomerativeClustering(numclusters=3) analysis.add( FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1), Stats(numderivs=1, flatten=True, cache=1), Standardise(cache=1), dr, clustering ) if __name__ == "__main__": analysis.run()
from ftis.corpus import Corpus from ftis.common.io import write_json from pathlib import Path corpus = ( Corpus("~/Cloud/Projects/DataBending/DataAudioUnique") .duration(min_duration=0.1, max_duration=10) ) point = Corpus("~/Cloud/Projects/ElectroMagnetic/reaper/Interruptions/media/06-Kindle Off-200513_1547-glued-04.wav") output = "../outputs/isolate_static" analysis = Chain( source = (point+corpus), folder = output ) kdtree = KDTree() dr = UMAP(components=10, cache=1) analysis.add( CollapseAudio(), FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1), Stats(numderivs=1, flatten=True, cache=1), Standardise(cache=1), dr, kdtree ) if __name__ == "__main__": analysis.run()
import jinja2 db_corpus = ( Corpus("~/Cloud/Projects/DataBending/DataAudioUnique") .duration(min_duration=0.1, max_duration=10) ) em_corpus = ( Corpus("../outputs/em_detailed_segmentation/1_ExplodeAudio") .duration(min_duration=0.1, max_duration=20) ) output = "../outputs/multicorpus_exploring" analysis = Chain( source = (db_corpus + em_corpus), folder = output ) clustering = HDBSCAN(minclustersize=10, cache=1) analysis.add( FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1), Stats(numderivs=1, flatten=True, cache=1), Standardise(cache=1), UMAP(components=10, cache=1), clustering ) if __name__ == "__main__": analysis.run() tracks = {}
from ftis.analyser.descriptor import FluidMFCC from ftis.analyser.scaling import Standardise from ftis.analyser.dr import UMAP from ftis.analyser.clustering import HDBSCAN from ftis.analyser.stats import Stats from ftis.process import FTISProcess as Chain from ftis.corpus import Corpus db_corpus = (Corpus("~/Cloud/Projects/DataBending/DataAudioUnique").duration( min_duration=0.03)) em_corpus = (Corpus("../outputs/em_detailed_segmentation/1_ExplodeAudio")) analysis = Chain(source=db_corpus + em_corpus, folder="../outputs/multicorpus_exploring") analysis.add( FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1), Stats(numderivs=1, flatten=True, cache=1), Standardise(), UMAP(components=2), HDBSCAN(minclustersize=5)) if __name__ == "__main__": analysis.run()
from ftis.analyser import (FluidMFCC, Stats, Standardise, HDBSCluster) from ftis.process import FTISProcess as Chain from ftis.corpus import Corpus, PathLoader from ftis.common.io import get_duration from pathlib import Path import jinja2 folder = "../outputs/em_detailed_clustering" corpus = Corpus("../outputs/em_detailed_segmentation/1_ExplodeAudio").loudness( max_loudness=20) analysis = Chain(source=corpus, folder=folder) clustering = HDBSCluster(minclustersize=10, cache=1) analysis.add( FluidMFCC(discard=True, numcoeffs=20, fftsettings=[1024, -1, -1], cache=1), Stats(numderivs=1, flatten=True, cache=1), Standardise(cache=1), clustering) if __name__ == "__main__": analysis.run() tracks = {} for cluster, items in clustering.output.items(): track_id = cluster pos = 0 for audiofile in items: dur = get_duration(audiofile) item = { "file": audiofile, "length": get_duration(audiofile), "start": 0.0,
# Description """ Analyse each item belonging to a cluster so that they can be ranked in order of a descriptor. """ from ftis.analyser import Flux, FluidLoudness, Stats from ftis.process import FTISProcess as Chain src = "outputs/micro_segmentation/2_ExplodeAudio" flux_chain = Chain( source=src, folder="outputs/micro_segmentation_flux", ) loud_chain = Chain(source=src, folder="outputs/micro_segmentation_loudness") flux = Flux() loud = FluidLoudness() flux_chain.add(flux, Stats(numderivs=0)) loud_chain.add(loud, Stats(numderivs=0)) if __name__ == "__main__": flux_chain.run() loud_chain.run()