コード例 #1
0
 def run(self):
     self.setup()
     md = "# **** FTIS v1.1.1 ****"
     md += f"\n\n**Source: {self.corpus.path}**"
     md += f"\n\n**Sink: {self.sink}**"
     md += "\n\n---------------------"
     md += "\n\nBeginning processing..."
     self.console.print(Markdown(md))
     print("\n")
     self.run_analysers()
     self.general_metadata()
     write_json(self.metapath, self.metadata)
コード例 #2
0
ファイル: visualisation.py プロジェクト: jamesb93/ftis
 def run(self):
     self.output = self.process.sink / f"{self.order}_{self.__class__.__name__}"
     self.output.mkdir(exist_ok=True)
     self.check_dimensions()
     # self.copy_audio()
     self.fmt()
     script = Path(__file__).resolve()
     # copy data and assets
     web_assets = script.parent / "web_assets"
     dest = self.output / "web_assets"
     if dest.exists():
         rmtree(dest)
     copytree(web_assets, self.output / "web_assets")
     write_json(self.output / "web_assets" / "plot.json", self.data)
コード例 #3
0
ファイル: analyser.py プロジェクト: jamesb93/ftis
    def update_success(self, status: bool) -> None:
        try:
            existing_metadata = read_json(self.process.metapath)
        except FileNotFoundError:
            existing_metadata = {}

        try:
            success = existing_metadata["success"]  # extract the progress dict
        except KeyError:
            success = {}  # progress doesnt exist yet

        success[self.identity[
            "hash"]] = status  # update the status of this analyser
        # join any existing data into the metadata
        self.process.metadata["success"] = success  # modify the original
        write_json(self.process.metapath, self.process.metadata)
コード例 #4
0
ファイル: clustering.py プロジェクト: jamesb93/ftis
 def dump(self):
     write_json(self.dump_path, self.output)
コード例 #5
0
    print(f)
    print(i / len(files))
    ts = get_buffer(fluid.transientslice(f))

    if ts[0] != 0:
        ts.insert(0, 0)

    if len(ts) <= 2 and ts[0] == 0.0:
        d[str(f)] = -1
    else:
        # Let's grab the orderedness of the onsets
        norm = normalise(ts)
        average = mean(norm)
        robustified = [x / average for x in norm]
        first_deriv = deriv(robustified)
        d[str(f)] = stdev(first_deriv)

mi = 99999
ma = -99999
for v in d.values():
    if v > ma:
        ma = v
    if v < mi and v != -1:
        mi = v
ra = abs(ma - mi)
for k, v in zip(d.keys(), d.values()):
    d[k] = (v - mi) / ra

write_json("evenness.json", d)
cleanup()
コード例 #6
0
unstatic = Corpus(
    "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/0")
static = Corpus(
    "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1")

output = "../../reaper/Convolutions/tuned"

analysis = Chain(source=(db + tuned + unstatic + static), folder=output)

kdtree = KDTree()
dr = UMAP(components=10, cache=1)  # we need access to the original data
analysis.add(
    # FluidMFCC(discard=True, numcoeffs=20, fftsettings=[4096, -1, -1], cache=1),
    LibroCQT(cache=0),
    Stats(numderivs=1, flatten=True, cache=1),
    Standardise(cache=1),
    dr,
    kdtree)

if __name__ == "__main__":
    analysis.run()

    pinpoint = tuned.items[0]  # single item
    x = dr.output[pinpoint]
    dist, ind = kdtree.model.query([x], k=200)
    keys = [x for x in dr.output.keys()]
    names = [keys[x] for x in ind[0]]
    d = {"1": names}
    write_json(analysis.folder / "nearest_files.json", d)
コード例 #7
0
 def dump(self):
     d = {"corpus_items": [str(x) for x in self.output]}
     write_json(self.dump_path, d)
コード例 #8
0
ファイル: 7_fluid_dataset.py プロジェクト: jamesb93/ftis
    default="~/corpus-folder/fluid-dataset",
    type=str,
    help="Folder for output. This will be made if it doesnt exist.",
)
args = parser.parse_args()
"""
Using the python-flucoma package analyser outputs can be turned into datasets.
To do this we have to specifically create an instance of the analyser we are interested in.
After the FTISProcess has run we then extract the output from that instance.
"""

src = Corpus(args.input)
out = args.output

world = World(sink=out)

stats = Stats(numderivs=2,
              spec=["stddev", "mean"])  # create an instance of the stats class
src >> CollapseAudio() >> FluidMFCC() >> stats
world.build(src)

if __name__ == "__main__":
    world.run()

    # Now that ftis has completed lets pack the data into a fluid dataset
    dataset = dataset.pack(
        stats.output
    )  # use the pack function to marshall it to the right format
    dataset_path = Path(out) / "dataset.json"  # create an output path
    write_json(dataset_path.expanduser(), dataset)  # write to disk
コード例 #9
0
 def teardown(self):
     write_json(self.metapath, self.metadata)
     if self.clear:
         self.clear_cache()
コード例 #10
0
ファイル: scaling.py プロジェクト: jamesb93/ftis
 def dump(self):
     jdump(self.model, self.model_dump)
     write_json(self.dump_path, self.output)
コード例 #11
0
from ftis.corpus import Corpus
from ftis.common.io import write_json
from flucoma.dataset import pack
from pathlib import Path
"""
Let's make a KDTree of some convolution candidates to explore in max explorer.maxpat
"""

db = (Corpus("~/Cloud/Projects/DataBending/DataAudioUnique").duration(
    min_duration=0.3, max_duration=20))

em = Corpus(
    "~/Cloud/Projects/ElectroMagnetic/outputs/classification/4_Split/1")

output = "../outputs/convolution_candidates"

analysis = Chain(source=(em + db), folder=output)

kdtree = KDTree()
dr = UMAP(components=10, cache=1)
analysis.add(
    FluidMFCC(discard=True, numcoeffs=20, fftsettings=[2048, -1, -1], cache=1),
    Stats(numderivs=1, flatten=True, cache=1), Standardise(cache=1), dr,
    kdtree)

if __name__ == "__main__":
    analysis.run()
    d = dr.output
    path_out = Path(output) / "dataset.json"
    write_json(path_out, pack(d))