def _filter_rhythm_metricity_pairs(absolute_rhythm_and_metricities: tuple, start: float = None, stop: float = None) -> tuple: start_idx = 0 if start: start_idx = tools.find_closest_index( start, absolute_rhythm_and_metricities, key=operator.itemgetter(0)) if absolute_rhythm_and_metricities[start_idx][0] < start: start_idx += 1 if stop: stop_idx = tools.find_closest_index( stop, absolute_rhythm_and_metricities, key=operator.itemgetter(0)) if absolute_rhythm_and_metricities[stop_idx][0] < stop: stop_idx += 1 else: stop_idx = len(absolute_rhythm_and_metricities) return absolute_rhythm_and_metricities[start_idx:stop_idx]
def find_next_position(self, next_metrical_prime: int, expected_difference: fractions.Fraction): expected_position = self.relative_position + expected_difference next_metrical_prime_rhythms = self._rhythm_and_metricity_per_prime[ next_metrical_prime][0] test_area = next_metrical_prime_rhythms + (self._loop_size, ) n_loops_added = 0 closest_index = tools.find_closest_index(expected_position, test_area) if test_area[closest_index] <= self.relative_position: while test_area[closest_index] <= self.relative_position: closest_index += 1 else: while test_area[closest_index] == self._loop_size: expected_position -= self._loop_size closest_index = tools.find_closest_index( expected_position, test_area) n_loops_added += 1 deviation = expected_position - test_area[closest_index] if deviation < 0 and closest_index != 0: if (n_loops_added == 0 and test_area[closest_index - 1] <= self.relative_position): candidates = (closest_index, ) else: candidates = (closest_index, closest_index - 1) elif deviation > 0 and test_area[closest_index + 1] != self._loop_size: candidates = (closest_index, closest_index + 1) else: candidates = None if candidates: closest_index = max( (( idx, self._rhythm_and_metricity_per_prime[ next_metrical_prime][1][idx], ) for idx in candidates), key=operator.itemgetter(1), )[0] return type(self)( next_metrical_prime, closest_index, self._nth_loop + n_loops_added, self._rhythm_and_metricity_per_prime, self._loop_size, )
def quantisize_pitches(pitches: tuple, scale: tuple, concert_pitch: float = None) -> ji.JIMel: scale = sorted(scale) for p in scale: assert p.octave == 0 scale_cent = tuple(p.cents for p in scale) quantisized = [] for pitch in pitches: if concert_pitch: pitch.concert_pitch_freq = concert_pitch if pitch.is_empty: quantisized_pitch = mel.TheEmptyPitch else: octave = pitch.octave normalized_cents = pitch.cents + (-octave * 1200) quantisized_pitch = scale[tools.find_closest_index( normalized_cents, scale_cent)].register(octave) quantisized.append(quantisized_pitch) return ji.JIMel(quantisized)
def find_responsible_element(self, absolute_time_position: float): """Return element that is playing at the asked moment.""" assert absolute_time_position >= 0 if self.time_measure == "absolute": absolute_line = self else: absolute_line = self.convert2absolute() abs_start_positions = absolute_line.delay abs_end_positions = absolute_line.dur closest_index = tools.find_closest_index( absolute_time_position, abs_start_positions ) if abs_start_positions[closest_index] > absolute_time_position: closest_index -= 1 if abs_end_positions[closest_index] < absolute_time_position: msg = "Can't find any element at position {}.".format( absolute_time_position ) raise IndexError(msg) return self[closest_index]
def _make_transcription( self, starting_scale_degree: int, starting_intonation: int, cent_distances: float, ) -> tuple: pitches = [(starting_scale_degree, starting_intonation, 0)] fitness = 0 for distance in cent_distances: last_scale_degree, last_intonation, last_octave = pitches[-1] adapted_distance = ( distance + self._deviation_from_ideal_scale_degree_per_intonation[ last_scale_degree ][last_intonation] ) closest_item = tools.find_closest_item( adapted_distance, self._distance_to_other_scale_degrees_per_scale_degree[ last_scale_degree ], key=operator.itemgetter(1), ) new_scale_degree = closest_item[0][0] new_octave = last_octave + closest_item[0][1] last_pitch = self._intonations_per_scale_degree[last_scale_degree][ last_intonation ] last_pitch += ji.r(1, 1).register(last_octave) octavater = ji.r(1, 1).register(new_octave) possible_intonations = tuple( intonation + octavater for intonation in self._intonations_per_scale_degree[new_scale_degree] ) last_pitch_cents = last_pitch.cents distance_per_intonation = tuple( into.cents - last_pitch_cents for into in possible_intonations ) new_intonation = tools.find_closest_index(distance, distance_per_intonation) pitches.append((new_scale_degree, new_intonation, new_octave)) fitness += distance_per_intonation[new_intonation] return tuple(pitches), fitness
def interpolate_by_n_points(self, n_points: int) -> tuple: duration = self.duration points = tuple(range(n_points + 1)) point_position_per_interpolation = tools.accumulate_from_zero( tuple(float(point.delay / duration) * n_points for point in self[:-1]) ) point_position_per_interpolation = tuple( tools.find_closest_index(item, points) for item in point_position_per_interpolation ) points_per_interpolation = tuple( b - a for a, b in zip( point_position_per_interpolation, point_position_per_interpolation[1:] ) ) interpolations = tuple( item0.interpolate(item1, points + 1)[:-1] for item0, item1, points in zip(self, self[1:-1], points_per_interpolation) ) interpolations += ( self[-2].interpolate(self[-1], points_per_interpolation[-1]), ) return tuple(functools.reduce(operator.add, interpolations))
def render(self, name: str) -> None: self.server.recordOptions(dur=self.duration, filename="{}.wav".format(name), sampletype=4) import random as random_ambient_noise_lv random_ambient_noise_lv.seed(1) n_events = tools.find_closest_index( self.duration, tools.accumulate_from_zero(self.duration_per_event)) duration_per_event = self.duration_per_event[:n_events] sample_path_per_event = self.sample_path_per_event[:n_events] lv_per_effect = { "{}_lv".format(effect): tuple( next(self.level_per_effect[effect] ) if self.activity_object_per_effect[effect] (self.activity_lv_per_effect[effect]) else 0 for i in duration_per_event) for effect in self.__effects } ambient_noise_lv_per_event = tuple( random_ambient_noise_lv.uniform(0.2, 0.4) for i in duration_per_event) ambient_noise_lv_per_event = tuple((a, b) for a, b in zip( (0, ) + ambient_noise_lv_per_event, ambient_noise_lv_per_event)) # general dynamic level for each slice event_lv = tuple(self.volume * lv for lv in self.curve(n_events, "points")) ################################################################ # controlling different dsp parameter filter_freq_per_event = tuple( next(self.filter_freq_maker) for i in duration_per_event) filter_q_per_event = tuple( next(self.filter_q_maker) for i in duration_per_event) rm_freq_per_event = tuple( next(self.rm_freq_maker) for i in duration_per_event) transpo_per_event = tuple( next(self.transpo_maker) for i in duration_per_event) chenlee_chaos_per_event = tuple( next(self.chenlee_chaos_maker) for i in duration_per_event) chenlee_pitch_per_event = tuple( next(self.chenlee_pitch_maker) for i in duration_per_event) lorenz_chaos_per_event = tuple( next(self.lorenz_chaos_maker) for i in duration_per_event) lorenz_pitch_per_event = tuple( next(self.lorenz_pitch_maker) for i in duration_per_event) ################################################################ e = synthesis.pyo.Events( instr=SlicePlayer, path=sample_path_per_event, dur=duration_per_event, lv=event_lv, filter_freq=filter_freq_per_event, filter_q=filter_q_per_event, rm_freq=rm_freq_per_event, h_transpo=transpo_per_event, chenlee_chaos=chenlee_chaos_per_event, chenlee_pitch=chenlee_pitch_per_event, lorenz_chaos=lorenz_chaos_per_event, lorenz_pitch=lorenz_pitch_per_event, ambient_noise_lv=ambient_noise_lv_per_event, **lv_per_effect, ) e.play() self.server.start()
def _make_messages_for_one_tone(self, tone: old.Tone, channel_number: int) -> tuple: freq = tone.pitch.freq key = tools.find_closest_index(freq, _12edo_freq) cent_deviation = mel.SimplePitch.hz2ct(_12edo_freq[key], freq) if cent_deviation != 0: pitch_percent = (cent_deviation + self.maximum_cent_deviation ) / self.total_range_cent_deviation if pitch_percent > 1: pitch_percent = 1 logging.warn(self.pb_warn) if pitch_percent < 0: pitch_percent = 0 logging.warn(self.pb_warn) midi_pitch = int(self.maximum_pitch_bending * pitch_percent) midi_pitch -= self.maximum_pitch_bending_positive else: midi_pitch = self.maximum_pitch_bending_positive # time=18: adding small delay for avoiding pitch-bending effects messages = [] messages.append( mido.Message( "pitchwheel", channel=channel_number, pitch=midi_pitch, time=self.pitch_msg_delay, )) velocity = self._get_velocity(tone) duration = (self._convert_seconds2ticks(tone.duration) - self.note_on_msg_delay - self.pitch_msg_delay) messages.append( mido.Message( "note_on", note=key, velocity=velocity, time=self.note_on_msg_delay, channel=channel_number, )) for n in range(duration - 1): messages.append( mido.Message("pitchwheel", channel=channel_number, pitch=midi_pitch, time=1)) messages.append( mido.Message("note_off", note=key, velocity=velocity, time=1, channel=channel_number)) return tuple(messages)