Пример #1
0
import jinja2

tuned = Corpus(
    "/Users/james/Cloud/Projects/ElectroMagnetic/reaper/Convolutions/anchors/media/07-glued.wav"
)
db = (Corpus("~/Cloud/Projects/DataBending/DataAudioUnique").duration(
    min_duration=2, max_duration=20))

unstatic = Corpus(
    "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/0")
static = Corpus(
    "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1")

output = "../../reaper/Convolutions/tuned"

analysis = Chain(source=(db + tuned + unstatic + static), folder=output)

kdtree = KDTree()
dr = UMAP(components=10, cache=1)  # we need access to the original data
analysis.add(
    # FluidMFCC(discard=True, numcoeffs=20, fftsettings=[4096, -1, -1], cache=1),
    LibroCQT(cache=0),
    Stats(numderivs=1, flatten=True, cache=1),
    Standardise(cache=1),
    dr,
    kdtree)

if __name__ == "__main__":
    analysis.run()

    pinpoint = tuned.items[0]  # single item
Пример #2
0
# Description
"""
Split by activity into two clusters hopefully rendering sections that are static, versus those that are more gestural or dynamic
"""

from ftis.analyser import Flux, Stats, Normalise, AgglomerativeClustering
from ftis.process import FTISProcess as Chain
from pathlib import Path
from shutil import copyfile

src = "outputs/segments/2_ExplodeAudio"
folder = "outputs/classification"

process = Chain(
    source=src, 
    folder=folder
)

cluster = AgglomerativeClustering(numclusters=2)

process.add(
    Flux(cache=False),
    Stats(numderivs=2),
    Normalise(),
    cluster
)

if __name__ == "__main__":
    process.run()

    # Now implement a quasi one-shot analyser to copy the sound files to individual directories
Пример #3
0
from ftis.analyser import (FluidOnsetslice, FluidMFCC, Stats, Standardise,
                           UMAP, ExplodeAudio)
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus, PathLoader

analysis = Chain(source=Corpus("../../reaper/highgain_source/bounces"),
                 folder="../outputs/em_detailed_segmentation")

analysis.add(
    # Segmentation
    FluidOnsetslice(framedelta=20,
                    minslicelength=2,
                    filtersize=5,
                    threshold=0.3,
                    metric=0,
                    cache=1),
    ExplodeAudio(),
    # Analysis
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[4096, 512, 4096]),
    Stats(numderivs=1),
    Standardise(),
    UMAP(components=2))

if __name__ == "__main__":
    analysis.run()
Пример #4
0
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus
from ftis.common.io import write_json, get_duration
from pathlib import Path
import jinja2

"""
Give me a crude 3 cluster output of all of the 'static' files. Later on we'll find other files similar to these.
We can use both bits of information to help compose.
"""

output = "../../reaper/Convolutions/base_materials"
em = Corpus("~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1")

analysis = Chain(
    source = (em),
    folder = output
)

dr = UMAP(components=10, cache=1)
clustering = AgglomerativeClustering(numclusters=3)
analysis.add(
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1),
    Stats(numderivs=1, flatten=True, cache=1),
    Standardise(cache=1),
    dr,
    clustering
)

if __name__ == "__main__":
    analysis.run()
Пример #5
0
# Description
"""
Analyse each item belonging to a cluster so that they can be ranked in order of a descriptor.
"""

from ftis.analyser import Flux
from ftis.process import FTISProcess as Chain
from ftis.common.io import read_json
import numpy as np

src = "outputs/segments/2_ExplodeAudio"
folder = "outputs/metacluster_analysis"

process = Chain(source=src, folder=folder)

flux = Flux(cache=True)
process.add(flux)

if __name__ == "__main__":
    process.run()
    # print(flux.output)
    clusters = read_json("outputs/classification/3_AGCluster")
    for k in clusters.keys():
        buf = []
        for v in clusters[k]:
            for point in flux.output[v]:
                buf.append(point)
        print(f"Cluster {k}: {np.median(buf)}")
Пример #6
0
from ftis.corpus import Corpus
from ftis.common.io import write_json
from pathlib import Path


corpus = (
    Corpus("~/Cloud/Projects/DataBending/DataAudioUnique")
    .duration(min_duration=0.1, max_duration=10)
)

point = Corpus("~/Cloud/Projects/ElectroMagnetic/reaper/Interruptions/media/06-Kindle Off-200513_1547-glued-04.wav")

output = "../outputs/isolate_static"

analysis = Chain(
    source = (point+corpus),
    folder = output
)

kdtree = KDTree()
dr = UMAP(components=10, cache=1)
analysis.add(
    CollapseAudio(),
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1),
    Stats(numderivs=1, flatten=True, cache=1),
    Standardise(cache=1),
    dr,
    kdtree
)

if __name__ == "__main__":
    analysis.run()
Пример #7
0
"""
Analyse each segment in the 'gestural' pool and cluster it
"""

from ftis.analyser import (FluidMFCC, HDBSCluster, Stats, UMAP, Standardise,
                           Normalise)
from ftis.process import FTISProcess as Chain

src = "outputs/micro_segmentation/2_ExplodeAudio"
folder = "outputs/micro_clustering"

process = Chain(source=src, folder=folder)

process.add(FluidMFCC(cache=True),
            Stats(numderivs=1,
                  flatten=True, cache=False), Standardise(cache=False),
            Normalise(cache=False), UMAP(components=6),
            HDBSCluster(minclustersize=10))

if __name__ == "__main__":
    process.run()
Пример #8
0
from ftis.analyser import FluidMFCC, Stats, Standardise, UMAP, Normalise, HDBSCluster
from ftis.corpus import CorpusLoader, CorpusFilter
from ftis.process import FTISProcess as Chain

src = "outputs/micro_segmentation/2_ExplodeAudio"
folder = "outputs/quitest"

process = Chain(source=src, folder=folder)

process.add(CorpusLoader(cache=1), CorpusFilter(max_loudness=10, cache=1),
            FluidMFCC(discard=True, cache=1), Stats(numderivs=1, cache=1),
            UMAP(components=10, cache=1), HDBSCluster(minclustersize=5))

if __name__ == "__main__":
    process.run()
Пример #9
0
from ftis.analyser import ClusteredNMF
from ftis.process import FTISProcess as Chain

src = "outputs/classification/4_Split/1"
folder = "outputs/layers_extractions"

process = Chain(source=src, folder=folder)

process.add(
    ClusteredNMF(iterations=200,
                 components=10,
                 cluster_selection_method='leaf'))

if __name__ == "__main__":
    process.run()
Пример #10
0
from ftis.analyser.descriptor import FluidMFCC
from ftis.analyser.scaling import Standardise
from ftis.analyser.dr import UMAP
from ftis.analyser.clustering import HDBSCAN
from ftis.analyser.stats import Stats
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus

db_corpus = (Corpus("~/Cloud/Projects/DataBending/DataAudioUnique").duration(
    min_duration=0.03))

em_corpus = (Corpus("../outputs/em_detailed_segmentation/1_ExplodeAudio"))

analysis = Chain(source=db_corpus + em_corpus,
                 folder="../outputs/multicorpus_exploring")

analysis.add(
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1),
    Stats(numderivs=1, flatten=True, cache=1), Standardise(),
    UMAP(components=2), HDBSCAN(minclustersize=5))

if __name__ == "__main__":
    analysis.run()
Пример #11
0
# Description
"""
Segment using clustered segmentation approach and create an audio file per segment
"""

import jinja2
from ftis.analyser import (FluidNoveltyslice, ClusteredSegmentation,
                           ExplodeAudio, CollapseAudio)
from ftis.process import FTISProcess as Chain
from pathlib import Path
from ftis.common.conversion import samps2ms

src = "reaper/highgain_source/bounces"
folder = "outputs/segments"

process = Chain(source=src, folder=folder)

cs = ClusteredSegmentation(numclusters=3, windowsize=5)

process.add(FluidNoveltyslice(threshold=0.1, minslicelength=4096, feature=0),
            cs, ExplodeAudio())

if __name__ == "__main__":
    process.run()

    tracks = {}
    for audio_src, slices in zip(cs.output.keys(), cs.output.values()):
        f = Path(audio_src).resolve()
        track_id = str(f.name)
        pos = 0
        for i, (start, end) in enumerate(zip(slices, slices[1:])):
Пример #12
0
# Description
"""
Arrange segmentations along a single axis using dimension reduction
"""

from ftis.analyser import FluidMFCC, UMAP, Stats, Normalise
from ftis.process import FTISProcess as Chain
from ftis.common.conversion import samps2ms

src = "outputs/micro_segmentation/2_ExplodeAudio/"
folder = "outputs/oned"

process = Chain(source=src, folder=folder)

mfcc = FluidMFCC(cache=True)
stats = Stats(flatten=True, numderivs=1, cache=True)
umap = UMAP(components=1, cache=True)
normalise = Normalise()

process.add(mfcc, stats, umap, normalise)

if __name__ == "__main__":
    process.run()
Пример #13
0
from ftis.analyser import FluidMFCC, Stats, Standardise, UMAP, Normalise, HDBSCluster
from ftis.common.io import get_duration
from ftis.corpus import CorpusLoader, CorpusFilter
from ftis.process import FTISProcess as Chain
from pathlib import Path
import jinja2

src = "outputs/micro_segmentation/2_ExplodeAudio"
folder = "outputs/loudest"

process = Chain(source=src, folder=folder)
clustering = HDBSCluster(minclustersize=10)
process.add(
    CorpusLoader(cache=1), CorpusFilter(max_loudness=100, min_loudness=75),
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[8192, 128, 8192]),
    Stats(numderivs=1,
          spec=["median", "max", "min", "stddev", "mean", "skewness"]),
    UMAP(components=2), clustering)

if __name__ == "__main__":
    process.run()

    tracks = {}
    for cluster, items in clustering.output.items():
        track_id = cluster
        pos = 0
        for audiofile in items:

            dur = get_duration(audiofile)
            item = {
                "file": audiofile,
Пример #14
0
"""
A heavy sub-segmentation of the classified samples from the source.
The idea is to return short transients and micro gestures, wittling down to the micro level as much as is perceptually reasonable.
"""

import jinja2, os
from ftis.analyser import (ClusteredSegmentation, FluidOnsetslice,
                           ExplodeAudio, UMAP)
from ftis.process import FTISProcess as Chain
from ftis.common.conversion import samps2ms
from pathlib import Path

src = "outputs/classification/4_Split/0"
folder = "outputs/micro_segmentation"

process = Chain(source=src, folder=folder)

initial_segmentation = FluidOnsetslice(threshold=0.3, cache=True)
cluster_segmentation = ClusteredSegmentation(cache=True)

process.add(
    initial_segmentation,
    cluster_segmentation,
    ExplodeAudio(),
)

if __name__ == "__main__":
    process.run()
    # Write out the clustered segemnts to a REAPER file
    tracks = {}
    for audio_src, slices in cluster_segmentation.output.items():
Пример #15
0
# Description
"""
Analyse each item belonging to a cluster so that they can be ranked in order of a descriptor.
"""

from ftis.analyser import Flux, FluidLoudness, Stats
from ftis.process import FTISProcess as Chain

src = "outputs/micro_segmentation/2_ExplodeAudio"

flux_chain = Chain(
    source=src,
    folder="outputs/micro_segmentation_flux",
)

loud_chain = Chain(source=src, folder="outputs/micro_segmentation_loudness")

flux = Flux()
loud = FluidLoudness()

flux_chain.add(flux, Stats(numderivs=0))

loud_chain.add(loud, Stats(numderivs=0))

if __name__ == "__main__":
    flux_chain.run()
    loud_chain.run()