Пример #1
0
def getNoteTemplates(path_notes):
    list_templates = []
    list_noteaudio = find_files(path_notes,ext="wav")
    for noteaudio in list_noteaudio:
        S_mag = U.LoadAudio(noteaudio)
        init_H = np.ones((1,S_mag.shape[1]))
        template,activate = NMF.nmf_sklearn(S_mag,k=1,H=init_H,verbose=False)
        list_templates.append(template[:,0]/np.max(template))
    templates = np.stack(list_templates)
    return templates
"""

import util
"""
Code example for training U-Net
"""
"""
import network

Xlist,Ylist = util.LoadDataset(target="vocal")
print("Dataset loaded.")
network.TrainUNet(Xlist,Ylist,savefile="unet.model",epoch=30)
"""
"""
Code example for performing vocal separation with U-Net
"""
import glob
fl = glob.glob('audio/*')
fname = fl[1]
mag, phase = util.LoadAudio('audio/01 Calling (2).wav')
start = 0
end = 2048 + 1024

mask = util.ComputeMask(mag[:, start:end], unet_model="unet.model", hard=False)

util.SaveAudio("%s-vocal.wav" % fname, mag[:, start:end] * mask,
               phase[:, start:end])
util.SaveAudio("%s-inst.wav" % fname, mag[:, start:end] * (1 - mask),
               phase[:, start:end])
util.SaveAudio("%s-orig.wav" % fname, mag[:, start:end], phase[:, start:end])
Пример #3
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 18:44:18 2018

@author: wuyiming
"""

import util
import NMF
import chord
import NoteTemplate

import numpy as np

S = util.LoadAudio("PianoChord-70bpm.wav")
W = NoteTemplate.getNoteTemplates("audio_notes")

_, H = NMF.nmf_beta(S, 48, W.T, beta=0.5, iteration=20)

util.PlotPianoroll(H)

H_binary = (H / np.max(H)) > 0.2
util.PlotPianoroll(H_binary)

segments = [np.sum(seg, axis=1) for seg in util.SegmentByBeat(H, 70, 4 * 4)]
chords = [chord.match_chord(seg / seg.max()) for seg in segments]

print(chords)
Пример #4
0
@author: wuyiming
"""

import util
"""
Code example for training U-Net
"""
"""
import network

Xlist,Ylist = util.LoadDataset(target="vocal")
print("Dataset loaded.")
network.TrainUNet(Xlist,Ylist,savefile="unet.model",epoch=30)
"""
"""
Code example for performing vocal separation with U-Net
"""
fname = "original_mix.wav"
mag, phase = util.LoadAudio(fname)
start = 2048
end = 2048 + 1024

mask = util.ComputeMask(mag[:, start:end], unet_model="unet.model", hard=False)

util.SaveAudio("vocal-%s" % fname, mag[:, start:end] * mask, phase[:,
                                                                   start:end])
util.SaveAudio("inst-%s" % fname, mag[:, start:end] * (1 - mask),
               phase[:, start:end])
util.SaveAudio("orig-%s" % fname, mag[:, start:end], phase[:, start:end])
Пример #5
0
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--input', type=str, help="処理したい曲")
    parser.add_argument('-m', '--model', type=str, help="処理に使用するモデルのファイル名")
    args = parser.parse_args()

    fname = Path(args.input)
    model_name = args.model
    start = 0

    mag, phase = util.LoadStereoAudio(fname)
    end = (int(mag.shape[2] // 512) + 1) * 512
    rem = (end - mag.shape[2])

    mag, phase = concat(mag, mag.shape[1], rem,
                        2), concat(phase, phase.shape[1], rem, 2)
    mag2, phase2 = util.LoadAudio(fname)
    mag2, phase2 = concat(mag2, mag2.shape[0],
                          rem), concat(phase2, phase2.shape[0], rem)

    mag2, phase2 = np.array((mag2, )), np.array((phase2, ))
    mag, phase = np.concatenate((mag, mag2), axis=0), np.concatenate(
        (phase, phase2), axis=0)
    del mag2, phase2

    testdata(C.PATH_MODEL / model_name,
             mag,
             phase,
             save_path=fname.parent,
             start=start,
             end=end)