Esempio n. 1
0
from ftis.analyser.scaling import Standardise
from ftis.analyser.dr import UMAP
from ftis.analyser.stats import Stats
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus
from ftis.common.io import write_json, get_duration
from pathlib import Path
import jinja2

"""
Give me a crude 3 cluster output of all of the 'static' files. Later on we'll find other files similar to these.
We can use both bits of information to help compose.
"""

output = "../../reaper/Convolutions/base_materials"
em = Corpus("~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1")

analysis = Chain(
    source = (em),
    folder = output
)

dr = UMAP(components=10, cache=1)
clustering = AgglomerativeClustering(numclusters=3)
analysis.add(
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1),
    Stats(numderivs=1, flatten=True, cache=1),
    Standardise(cache=1),
    dr,
    clustering
)
Esempio n. 2
0
from ftis.analyser.descriptor import FluidMFCC, LibroCQT
from ftis.analyser.clustering import KDTree
from ftis.analyser.scaling import Standardise
from ftis.analyser.dr import UMAP
from ftis.analyser.stats import Stats
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus
from ftis.common.io import get_duration, write_json
from pathlib import Path
import jinja2

tuned = Corpus(
    "/Users/james/Cloud/Projects/ElectroMagnetic/reaper/Convolutions/anchors/media/07-glued.wav"
)
db = (Corpus("~/Cloud/Projects/DataBending/DataAudioUnique").duration(
    min_duration=2, max_duration=20))

unstatic = Corpus(
    "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/0")
static = Corpus(
    "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1")

output = "../../reaper/Convolutions/tuned"

analysis = Chain(source=(db + tuned + unstatic + static), folder=output)

kdtree = KDTree()
dr = UMAP(components=10, cache=1)  # we need access to the original data
analysis.add(
    # FluidMFCC(discard=True, numcoeffs=20, fftsettings=[4096, -1, -1], cache=1),
    LibroCQT(cache=0),
Esempio n. 3
0
    "--input",
    default="~/Documents/Max 8/Packages/TB2-Alpha06-MacOS-Max/media",
    type=str,
    help="Folder for input. This should contain some audio files.",
)
args = parser.parse_args()

"""
You can also filter Corpus items by features of the individual items names.
This example is designed to run on the Fluid Corpus Manipulation example items
"""


# Corpora can be pre-processed to remove files that match certain patterns or constraints
# In thie case I am taking the top 10% of files by EBUR-128 loudness and filtering the corpus to that selection
corpus = Corpus(args.input)
corpus.has("Nico")
print("\n", corpus.items, "\n")

corpus = Corpus(args.input)
corpus.startswith("Nico")
print("\n", corpus.items, "\n")

corpus = Corpus(args.input)
corpus.endswith("641")
print("\n", corpus.items, "\n")

corpus = Corpus(args.input)
corpus.startswith("Tremblay").endswith("M").has("FMTri")
print("\n", corpus.items, "\n")
Esempio n. 4
0
from ftis.analyser import (FluidOnsetslice, FluidMFCC, Stats, Standardise,
                           UMAP, ExplodeAudio)
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus, PathLoader

analysis = Chain(source=Corpus("../../reaper/highgain_source/bounces"),
                 folder="../outputs/em_detailed_segmentation")

analysis.add(
    # Segmentation
    FluidOnsetslice(framedelta=20,
                    minslicelength=2,
                    filtersize=5,
                    threshold=0.3,
                    metric=0,
                    cache=1),
    ExplodeAudio(),
    # Analysis
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[4096, 512, 4096]),
    Stats(numderivs=1),
    Standardise(),
    UMAP(components=2))

if __name__ == "__main__":
    analysis.run()
Esempio n. 5
0
from ftis.analyser.descriptor import FluidMFCC
from ftis.analyser.clustering import KDTree
from ftis.analyser.scaling import Standardise
from ftis.analyser.audio import CollapseAudio
from ftis.analyser.dr import UMAP
from ftis.analyser.stats import Stats
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus
from ftis.common.io import write_json
from pathlib import Path


corpus = (
    Corpus("~/Cloud/Projects/DataBending/DataAudioUnique")
    .duration(min_duration=0.1, max_duration=10)
)

point = Corpus("~/Cloud/Projects/ElectroMagnetic/reaper/Interruptions/media/06-Kindle Off-200513_1547-glued-04.wav")

output = "../outputs/isolate_static"

analysis = Chain(
    source = (point+corpus),
    folder = output
)

kdtree = KDTree()
dr = UMAP(components=10, cache=1)
analysis.add(
    CollapseAudio(),
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1),
Esempio n. 6
0
    help="Folder for input. This should contain some audio files.",
)
parser.add_argument(
    "-o",
    "--output",
    default="~/corpus-folder/corpus-management",
    type=str,
    help="Folder for output. This will be made if it doesnt exist.",
)
args = parser.parse_args()
"""
Corpus objects overload the addition operator.
This allows you to add Corpus objects together to create a new Corpus from their materials.
"""

em = Corpus(args.corpusone)  # first corpus
db = Corpus(args.corpustwo)  # second corpus
corpus = em + db  # compose a corpus of both sub-corpora

# adding two Corpus() objects together modifies their contents
# em + db is NOT the same as db + em, unless you are assigning the output
# If you want to add the contents of db to em, you would need to structure it...
# ```em + db```
# and use em as the 'source' for the FTISProcess

world = World(sink=args.output)
corpus >> FluidMFCC()
world.build(corpus)

if __name__ == "__main__":
    world.run()
Esempio n. 7
0
from ftis.world import World
from ftis.corpus import Corpus
from ftis.analyser.meta import ClusteredSegmentation
from ftis.analyser.slicing import FluidNoveltyslice
from ftis.adapter.reaper import render_tracks
from ftis.common.io import get_sr
from pathlib import Path

c = Corpus("~/corpus-folder/corpus2")
nov = FluidNoveltyslice(threshold=0.2, minslicelength=2, cache=True)
# if we dont set the minslicelength to 2 then we might generate slices too short for the analysis
seg = ClusteredSegmentation(cache=True)

c >> nov >> seg

w = World(sink="~/clustered_segmentation")
w.build(c)

if __name__ == "__main__":
    w.run()

    tracks = {}
    for media, slices in seg.output.items():
        pos = 0
        sr = get_sr(media)
        items = []
        for start, end in zip(slices, slices[1:]):
            start /= sr
            end /= sr

            item = {
Esempio n. 8
0
    default="~/corpus-folder/corpus1",
    type=str,
    help="Folder for input. This should contain some audio files.",
)
parser.add_argument(
    "-o",
    "--output",
    default="~/corpus-folder/corpus-management",
    type=str,
    help="Folder for output. This will be made if it doesnt exist.",
)
args = parser.parse_args()
"""
Corpus objects come bundled with some filtering processes to remove items that you don't want or need.
This example demonstrates filtering a corpus so only the top ten percent of samples by loudness are let through.
"""

# Corpora can be pre-processed to remove files that match certain patterns or constraints
# In thie case I am taking the top 10% of files by EBUR-128 loudness and filtering the corpus to that selection
corpus = Corpus(args.input)
print(f"Corpus began with {len(corpus.items)} items")
corpus.loudness(min_loudness=90)
print(f"Corpus filtered to {len(corpus.items)} items")

# You can also use a more declarative syntax like so:
# new_corpus = (
#     Corpus("~/corpus-folder/corpus1")
#     .loudness(max_loudness=10) #filter to bottom 10%
# )
# This becomes more clear when you use multiple filters
Esempio n. 9
0
from ftis.analyser.descriptor import FluidMFCC, LibroCQT
from ftis.analyser.scaling import Standardise
from ftis.analyser.dr import UMAP
from ftis.analyser.clustering import HDBSCAN
from ftis.analyser.stats import Stats
from ftis.common.io import get_duration
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus
from pathlib import Path
import jinja2


db_corpus = (
    Corpus("~/Cloud/Projects/DataBending/DataAudioUnique")
    .duration(min_duration=0.1, max_duration=10)
)

em_corpus = (
    Corpus("../outputs/em_detailed_segmentation/1_ExplodeAudio")
    .duration(min_duration=0.1, max_duration=20)
)

output = "../outputs/multicorpus_exploring"
analysis = Chain(
    source = (db_corpus + em_corpus), 
    folder = output
)

clustering = HDBSCAN(minclustersize=10, cache=1)
analysis.add(
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1),
Esempio n. 10
0
Taking three anchor points:

/Users/james/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1/Mouse_01_1.wav
/Users/james/Cloud/Projects/DataBending/DataAudioUnique/pnacl_public_x86_64_pnacl_llc_nexe_4.wav
/Users/james/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1/RME Face Panel_01_1.wav

Let's produce a KDTree and then put the nearest neighbours of each anchor onto a track.
"""

anchors = [
    '/Users/james/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1/Mouse_01_1.wav',
    '/Users/james/Cloud/Projects/DataBending/DataAudioUnique/pnacl_public_x86_64_pnacl_llc_nexe_4.wav',
    '/Users/james/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1/RME Face Panel_01_1.wav'
]

db = (Corpus("~/Cloud/Projects/DataBending/DataAudioUnique").duration(
    min_duration=2, max_duration=20))

em = Corpus(
    "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1")

output = "../../reaper/Convolutions/anchors"

analysis = Chain(source=(em + db), folder=output)

kdtree = KDTree()
dr = UMAP(components=10, cache=1)  # we need access to the original data
analysis.add(
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[4096, -1, -1], cache=1),
    Stats(numderivs=1, flatten=True, cache=1), Standardise(cache=1), dr,
    kdtree)
Esempio n. 11
0
from ftis.corpus import Corpus
from ftis.world import World
from ftis.world import World
from ftis.analyser.audio import CollapseAudio
from ftis.analyser.descriptor import (FluidLoudness, FluidPitch, FluidMFCC)
from ftis.analyser.stats import Stats
from pathlib import Path

corpus = Corpus("~/corpus-folder/corpus1")
collapse = CollapseAudio()
pitch = FluidPitch(fftsettings=[1024, 512, 1024])
pitch_stats = Stats(spec=["median"])
loudness = FluidLoudness(windowsize=1024, hopsize=512)
loudness_stats = Stats(spec=["mean"])


def mask_with_loudness(self):
    for k, v in loudness_stats.output.items():
        mean = v[0]
        for i, (x, y) in enumerate(loudness.output.items()):
            dbfs = y[0]
            if sum(dbfs) / len(dbfs) < mean:
                del self.output[k][0][i]
                del self.output[k][1][i]


pitch.post = mask_with_loudness

# script the connections
corpus >> collapse
collapse >> loudness >> loudness_stats
Esempio n. 12
0
from ftis.analyser.audio import CollapseAudio
from ftis.corpus import Corpus
from ftis.world import World
import argparse

parser = argparse.ArgumentParser(
    description="Process input and output location")
parser.add_argument(
    "-i",
    "--input",
    default="~/corpus-folder/corpus1",
    type=str,
    help="Folder for input. This should contain some audio files.",
)
parser.add_argument(
    "-o",
    "--output",
    default="~/corpus-folder/collapse",
    type=str,
    help="Folder for output. This will be made if it doesnt exist.",
)
args = parser.parse_args()

process = World(source=Corpus(args.input), sink=args.output)

process.add(CollapseAudio())

if __name__ == "__main__":
    process.run()
Esempio n. 13
0
from ftis.analyser.descriptor import FluidMFCC
from ftis.analyser.scaling import Standardise
from ftis.analyser.dr import UMAP
from ftis.analyser.clustering import HDBSCAN
from ftis.analyser.stats import Stats
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus

db_corpus = (Corpus("~/Cloud/Projects/DataBending/DataAudioUnique").duration(
    min_duration=0.03))

em_corpus = (Corpus("../outputs/em_detailed_segmentation/1_ExplodeAudio"))

analysis = Chain(source=db_corpus + em_corpus,
                 folder="../outputs/multicorpus_exploring")

analysis.add(
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1),
    Stats(numderivs=1, flatten=True, cache=1), Standardise(),
    UMAP(components=2), HDBSCAN(minclustersize=5))

if __name__ == "__main__":
    analysis.run()
Esempio n. 14
0
    '--input',
    default="~/corpus-folder/corpus1",
    type=str,
    help="Folder for input. This should contain some audio files.")
parser.add_argument(
    '-o',
    '--output',
    default="~/corpus-folder/lambdas",
    type=str,
    help='Folder for output. This will be made if it doesnt exist.')
args = parser.parse_args()
"""
We can pre and process data in place, rather than having to create new analysers to process input and output data.
This functionality is inherited from the base analyser class, so everything can do it, you just need to implement what you want to do.
In this example, I simply reset the values in the output of the FluidLoudness() class after processing
"""

out = args.output
world = World(sink=out)


def remove_truepeak(self):
    self.output = {k: v[0] for k, v in self.output.items()}


corpus = Corpus(args.input)
corpus >> CollapseAudio() >> FluidLoudness(post=remove_truepeak)
world.build(corpus)

if __name__ == "__main__":
    world.run()
Esempio n. 15
0
from ftis.analyser import (FluidMFCC, Stats, Standardise, HDBSCluster)
from ftis.process import FTISProcess as Chain
from ftis.corpus import Corpus, PathLoader
from ftis.common.io import get_duration
from pathlib import Path
import jinja2

folder = "../outputs/em_detailed_clustering"
corpus = Corpus("../outputs/em_detailed_segmentation/1_ExplodeAudio").loudness(
    max_loudness=20)
analysis = Chain(source=corpus, folder=folder)

clustering = HDBSCluster(minclustersize=10, cache=1)
analysis.add(
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[1024, -1, -1], cache=1),
    Stats(numderivs=1, flatten=True, cache=1), Standardise(cache=1),
    clustering)

if __name__ == "__main__":
    analysis.run()

    tracks = {}
    for cluster, items in clustering.output.items():
        track_id = cluster
        pos = 0
        for audiofile in items:
            dur = get_duration(audiofile)
            item = {
                "file": audiofile,
                "length": get_duration(audiofile),
                "start": 0.0,