def run(self, data): audio_samples = data.get('raw_audio') if audio_samples is None: return with self.fps: # Normalize samples between 0 and 1 y = audio_samples / 2.0**15 # Construct a rolling window of audio samples self.y_roll[:-1] = self.y_roll[1:] self.y_roll[-1, :] = np.copy(y) y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32) output = None vol = np.max(np.abs(y_data)) if vol < self.config['MIN_VOLUME_THRESHOLD']: # print('No audio input. Volume below threshold. Volume:', vol) output = np.tile(0, self.config['N_FFT_BINS']).astype(float) else: # Transform audio input into the frequency domain N = len(y_data) N_zeros = 2**int(np.ceil(np.log2(N))) - N # Pad with zeros until the next power of two y_data *= self.fft_window y_padded = np.pad(y_data, (0, N_zeros), mode='constant') YS = np.abs(np.fft.rfft(y_padded)[:N // 2]) # Construct a Mel filterbank from the FFT data mel = np.atleast_2d(YS).T * self.mel_y.T # Scale data to values more suitable for visualization # mel = np.sum(mel, axis=0) mel = np.sum(mel, axis=0) mel = mel**2.0 # Gain normalization self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0))) mel /= self.mel_gain.value mel = self.mel_smoothing.update(mel) output = mel if output is not None: data['audio'] = output self.net_send_samples.append(list(output)) if time.time() - self.net_send_time >= self.net_send_rate: self.net_send_time = time.time() net_data = [ sum((self.net_send_samples[i][b] for i in range(len(self.net_send_samples)))) / len(self.net_send_samples) for b in range(len(self.net_send_samples[0])) ] send_monitor(None, 'AUDIO', bins=net_data) self.net_send_samples = []
def prep_dmx(self): # If this light is not suspended, copy the auto state to the real state if self.name not in self.config.get('SUSPENDED', []): self.state.update(self.auto_state) out = dict(self.state) changed = {} for k, v in out.items(): if v != self.last_state[k]: changed[k] = v if changed: # print(changed) send_monitor(self, 'STATE', **changed) for k in self.INVERT: out[k] = 255 - out[k] return out
def _run_effects(self, data): done = [] for fn, e in self.effects.items(): dest = self.auto_state if e.automation else self.state value = None if e.done: value = e.done_value done.append(fn) else: value = e.value if fn in self.MULTI_PROP_MAP: dest.update(dict(zip(self.MULTI_PROP_MAP[fn], value))) else: dest[fn] = value for k in done: send_monitor(self, 'EFFECT', opstate='DONE', opname=k, **self.effects[k].args) del self.effects[k]
def run(self, data): with self.fps: new_state = data.get('push_state__' + self.name) if new_state: self.auto_state = dict(new_state) # Normally, lights without a mapping won't have effects, unless added via non-automation self._run_effects(data) if self.output_config.get('MAPPING'): if self.state_effect: if not self.state_effect.applicable(self, data): send_monitor(self, 'STATE_EFFECT', opstate='DONE', opname=self.state_effect.__class__.__name__) self.state_effect.unapply(self, data) self.state_effect = None self.send_dmx(data, True) else: self.state_effect.run(self, data) for e in self.state_effects: if e is self.state_effect: break if e.applicable(self, data): if self.state_effect: send_monitor(self, 'STATE_EFFECT', opstate='DONE', opname=self.state_effect.__class__.__name__) self.state_effect.unapply(self, data) self.state_effect = e e.apply(self, data) self.send_dmx(data, True) send_monitor(self, 'STATE_EFFECT', opstate='NEW', opname=self.state_effect.__class__.__name__) break self._run_mapping(data) self.send_dmx(data)
def add_effect(self, k, effect, overwrite=False): if overwrite or k not in self.effects: send_monitor(self, 'EFFECT', opstate='NEW', opname=k, **effect.args) self.effects[k] = effect