def applyReverb(sound, value, pad=1000, fade_in=10, fade_out=10): if value <= 0: return sound # Add padding if pad > 0: sound += AudioSegment.silent(duration=pad, frame_rate=sound.frame_rate) # convert pydub sound to np array samples = np.array(sound.get_array_of_samples()) samples = samples.astype(np.int16) chain = AudioEffectsChain() chain.reverb(reverberance=value) # apply reverb effect fx = (chain) y = fx(samples) # convert it back to an array and create a new sound clip newData = array.array(sound.array_type, y) newSound = sound._spawn(newData) dur = len(newSound) newSound = newSound.fade_in(min(fade_in, dur)).fade_out(min(fade_out, dur)) return newSound
def addFx(sound, effects, pad=3000, fade_in=100, fade_out=100): # Add padding if pad > 0: sound += AudioSegment.silent(duration=pad, frame_rate=sound.frame_rate) # convert pydub sound to np array samples = np.array(sound.get_array_of_samples()) samples = samples.astype(np.int16) chain = AudioEffectsChain() for effect, value in effects: if effect == "reverb" and value > 0: chain.reverb(reverberance=value) elif effect == "distortion" and value > 0: chain.overdrive(gain=value) elif effect == "highpass" and value > 0: chain.highpass(value) elif effect == "lowpass" and value > 0: chain.lowpass(value) elif effect == "bass": frequency = 100 gain = value if isinstance(value, tuple): gain, frequency = value print("%s, %s" % (gain, frequency)) chain.highshelf(gain=gain, frequency=frequency) elif effect == "echo": echoStr = "echo 0.8 0.9" amount = value count = 1 # check if we have echo count indicated if isinstance(value, tuple): amount, count = value for i in range(count): # amount between 10 (robot) and 1000 (mountains) echoStr += " %s 0.3" % amount chain.custom(echoStr) elif effect == "tempo" and value != 1.0 and value != 1: chain.tempo(factor=value) # apply reverb effect fx = (chain) y = fx(samples) # convert it back to an array and create a new sound clip newData = array.array(sound.array_type, y) newSound = sound._spawn(newData) dur = len(newSound) newSound = newSound.fade_in(min(fade_in, dur)).fade_out(min(fade_out, dur)) return newSound
def worker(_): sig = np.random.standard_normal(44100 * 8) effect = AudioEffectsChain() effect = effect.pitch(np.random.uniform(-300, 300)) effect = effect.tempo(np.random.uniform(0.8, 1.2)) effect = effect.reverb(np.random.uniform(0, 100)) return effect(sig)
def randAudioAugment(): fx = AudioEffectsChain() effect = [random.randint(0, 1) for i in range(6)] if effect[0] == 1: # lowshelf randGain = random.randint(0, 12) * random.choice([-1, 1]) randFreq = random.randint(20, 300) randSlop = random.uniform(1, 7) / 10 # 0.1~0.7 fx.lowshelf(gain=randGain, frequency=randFreq, slope=randSlop) if effect[1] == 1: # highshelf randGain = random.randint(0, 12) * random.choice([-1, 1]) randFreq = random.randint(1000, 3000) randSlop = random.uniform(1, 7) / 10 # 0.1~0.7 fx.highshelf(gain=randGain, frequency=randFreq, slope=randSlop) if effect[2] == 1: # equalizer randFreq = random.randint(100, 3000) randQ = random.uniform(5, 15) / 10 # 0.5~1.5 randDB = random.randint(0, 6) * random.choice([-1, 1]) fx.equalizer(frequency=randFreq, q=randQ, db=randDB) if effect[3] == 1: # overdrive randGain = random.randint(3, 7) fx.overdrive(gain=randGain, colour=40) if effect[4] == 1: # phaser fx.phaser( gain_in=0.9, gain_out=0.8, delay=1, decay=0.25, speed=2, triangular=False ) if effect[5] == 1: # reverb randReverb = random.randint(30, 70) randDamp = random.randint(30, 70) randRoom = random.randint(30, 70) randWet = random.randint(1, 6) fx.reverb( reverberance=randReverb, hf_damping=randDamp, room_scale=randRoom, stereo_depth=100, pre_delay=20, wet_gain=randWet, wet_only=False, ) return fx
async def addAudioEffects(self): fx = AudioEffectsChain() if self.reverse: fx.reverse() if self.speed != None: fx.speed(factor=self.speed) if self.reverb != None: fx.reverb(reverberance=self.reverb) if self.overdrive != None: fx.overdrive(gain=self.overdrive) wavDirPath = self.dirs['wavAudio'].dirPath processedDirPath = self.dirs['processedAudio'].dirPath wavPath = f'{wavDirPath}/{self.fileName}.wav' processedPath = f'{processedDirPath}/{self.fileName}.wav' fx(wavPath, processedPath)
def __call__(self, input): effect = AudioEffectsChain() if np.random.uniform() > 0.5: effect = effect.pitch(np.random.uniform(-300, 300)) if np.random.uniform() > 0.5: effect = effect.tempo(np.random.uniform(0.8, 1.2)) if np.random.uniform() > 0.5: effect = effect.reverb(np.random.uniform(0, 100)) # if np.random.uniform() > 0.5: # effect = effect.overdrive(np.random.uniform(0, 10)) # if np.random.uniform() > 0.5: # effect = effect.limiter(np.random.uniform(-10, 10)) # if np.random.uniform() > 0.5: # effect = effect.lowshelf() # if np.random.uniform() > 0.5: # effect = effect.highshelf() return effect(input)
def _reverb(self, chain: AudioEffectsChain): return chain.reverb()
parser.add_argument('--bitrate', type=str, default='12000k') parser.add_argument('--margin-left', type=int, default=0) parser.add_argument('--margin-right', type=int, default=0) args = parser.parse_args() # we import everything we need here for performance from moviepy.editor import * from pysndfx import AudioEffectsChain from math import trunc import os # apply effects to audio fx = AudioEffectsChain().speed(args.speed) if args.reverb: fx = fx.reverb() fx(args.audio, 'tmp.mp3') # check if the background is a gif is_gif = args.background.endswith('.gif') # load the background and the audio audio = AudioFileClip('tmp.mp3') image = VideoFileClip(args.background) if is_gif else ImageClip( args.background) image = image.margin(left=args.margin_left, right=args.margin_right) # set loop times if is_gif: loops = trunc(audio.duration / image.duration)