def intercalate_silences(rhythm_command_list, voice_names=None): global_timespan = abjad.Timespan( start_offset=0, stop_offset=max(_.timespan.stop_offset for _ in rhythm_command_list), ) silence_maker = handlers.RhythmHandler( rmakers.stack( rmakers.NoteRhythmMaker(), rmakers.force_rest( lambda _: abjad.Selection(_).leaves(pitched=True)), ), name="silence_maker", ) if voice_names is None: voice_names = sorted(set(_.voice_name for _ in rhythm_command_list)) for voice_name in voice_names: timespan_list = abjad.TimespanList([ _.timespan for _ in rhythm_command_list if _.voice_name == voice_name ]) silences = abjad.TimespanList([global_timespan]) for timespan in timespan_list: silences -= timespan for timespan in silences: new_command = RhythmCommand( voice_name, timespan, silence_maker, ) rhythm_command_list.append(new_command)
def _cleanup_silent_timespans( self, layer, silenced_context_names, timespans, ): if not silenced_context_names or not timespans: return silent_timespans_by_context = {} for context_name in silenced_context_names: if context_name not in silent_timespans_by_context: silent_timespans_by_context[context_name] = \ abjad.TimespanList() sounding_timespans_by_context = {} sounding_timespans = abjad.TimespanList() for timespan in timespans: voice_name = timespan.voice_name if isinstance(timespan, PerformedTimespan): if voice_name not in sounding_timespans_by_context: sounding_timespans_by_context[voice_name] = \ abjad.TimespanList() sounding_timespans_by_context[voice_name].append(timespan) sounding_timespans.append(timespan) else: if voice_name not in silent_timespans_by_context: silent_timespans_by_context[voice_name] = \ abjad.TimespanList() silent_timespans_by_context[voice_name].append(timespan) sounding_timespans.sort() sounding_timespans.compute_logical_or() # Create silences. for shard in sounding_timespans.partition(True): for context_name in silenced_context_names: timespan = SilentTimespan( layer=layer, voice_name=context_name, start_offset=shard.start_offset, stop_offset=shard.stop_offset, ) silent_timespans_by_context[context_name].append(timespan) # Remove any overlap between performed and silent timespans. # Then add the silent timespans into the original timespan inventory. for context_name, silent_timespans in \ sorted(silent_timespans_by_context.items()): silent_timespans.sort() if context_name in sounding_timespans_by_context: for timespan in sounding_timespans_by_context[context_name]: silent_timespans - timespan timespans.extend(silent_timespans)
def opposite_timespans(one_voice_timespan_list): for span1, span2 in zip(one_voice_timespan_list, one_voice_timespan_list[1:]): i = one_voice_timespan_list.index(span1) if i == 0: if span1.start_offset == 0: print("it is at the beginning") pass else: new_initial_span = abjad.AnnotatedTimespan( start_offset=(0, 1), stop_offset=span1.start_offset, annotation="Silence " + one_voice_timespan_list[0].annotation, ) one_voice_timespan_list.append(new_initial_span) timespans = abjad.TimespanList([span1, span2]) if timespans.all_are_contiguous is False: new_span = abjad.AnnotatedTimespan( start_offset=span1.stop_offset, stop_offset=span2.start_offset, annotation="Silence " + one_voice_timespan_list[i + 1].annotation, ) one_voice_timespan_list.append(new_span)
def __call__(self, counts, max_duration=None, translation=0, rotation=None, voice_name=None): if rotation: counts = counts[rotation:] + counts[:rotation] counts = self._ready_counts(counts, translation) denominator = self.denominator increment_total = 0 + translation timespan_list = abjad.TimespanList([]) for count in counts: if count < 0: increment_total += abs(count) continue start = increment_total stop = start + count if max_duration is not None: stop = (max_duration + start) if count > max_duration else stop timespan_list.append( abjad.Timespan( start_offset=(start, denominator), stop_offset=(stop, denominator), annotation=voice_name, )) increment_total += count return timespan_list
def split_at_offset(self, offset): offset = abjad.Offset(offset) result = abjad.TimespanList() if self._start_offset < offset < self._stop_offset: left_divisions, right_divisions = None, None if self.divisions is not None: left_divisions, right_divisions = abjad.split_sequence( self.divisions, [offset - self.start_offset], overhang=True, ) left = abjad.new( self, start_offset=self._start_offset, stop_offset=offset, divisions=left_divisions, ) right = abjad.new( self, start_offset=offset, stop_offset=self._stop_offset, divisions=right_divisions, ) if left.duration: result.append(left) if right.duration: result.append(right) else: result.append(abjad.new(self)) return result
def __call__( self, layer=None, music_specifiers=None, rotation=None, silenced_context_names=None, target_timespan=None, timespan_list=None, ): if not isinstance(timespan_list, abjad.TimespanList): timespan_list = abjad.TimespanList(timespan_list, ) if target_timespan is None: if timespan_list: target_timespan = timespan_list.timespan else: raise TypeError assert isinstance(timespan_list, abjad.TimespanList) if not music_specifiers: return timespan_list music_specifiers = self._coerce_music_specifiers(music_specifiers) new_timespans = self._make_timespans( layer=layer, music_specifiers=music_specifiers, target_timespan=target_timespan, timespan_list=timespan_list, ) self._cleanup_silent_timespans( layer=layer, silenced_context_names=silenced_context_names, timespans=new_timespans, ) timespan_list.extend(new_timespans) timespan_list.sort() return timespan_list
def _collect_preexisting_timespans( self, target_timespan=None, timespan_list=None, ): preexisting_timespans = abjad.TimespanList() for timespan in timespan_list: if not isinstance(timespan, PerformedTimespan): continue if self.voice_names and timespan.voice_name not in self.voice_names: continue if not self.labels: pass elif (not hasattr(timespan, "music_specifier") or not timespan.music_specifier or not timespan.music_specifier.labels): continue elif not any(label in timespan.music_specifier.labels for label in self.labels): continue preexisting_timespans.append(timespan) if self.inspect_music and timespan.music: outer_start_offset = timespan.start_offset inner_start_offset = abjad.get.timespan( timespan.music).start_offset assert inner_start_offset == 0 for division in timespan.music: division_timespan = abjad.get.timespan(division) division_timespan = division_timespan.translate( outer_start_offset) preexisting_timespans.append(division_timespan) preexisting_timespans & target_timespan return preexisting_timespans
def _collect_preexisting_timespans( self, target_timespan=None, timespan_list=None, ): preexisting_timespans = abjad.TimespanList() for timespan in timespan_list: assert isinstance(timespan, ( tsmakers.PerformedTimespan, tsmakers.SilentTimespan, )) if isinstance(timespan, tsmakers.SilentTimespan): continue if (self.voice_names and timespan.voice_name not in self.voice_names): continue if self.labels: if not timespan.music_specifier: continue music_specifier_labels = timespan.music_specifier.labels or () for label in self.labels: if label in music_specifier_labels: preexisting_timespans.append(timespan) break else: preexisting_timespans.append(timespan) preexisting_timespans & target_timespan return preexisting_timespans
def __call__(self, timespan): assert isinstance(timespan, abjad.Timespan) divided_timespan = timespan.divide_by_ratio(self.ratio) timespans = abjad.TimespanList() for part in self.parts: timespans.append(divided_timespan[part]) if self.mask_timespan is not None: timespans & self.mask_timespan return timespans
def __call__( self, durations=None, layer=None, division_masks=None, division_mask_seed=None, padding=None, seed=None, start_offset=None, timespan_specifier=None, voice_name=None, ): seed = seed or 0 rotation_indices = self.rotation_indices or (0, ) rotation_indices = abjad.CyclicTuple(rotation_indices) primary_durations = abjad.Sequence(durations) start_offset = start_offset or 0 if self.discard_inner_offsets: secondary_durations = [sum(primary_durations)] else: # secondary_durations = rotate( # primary_durations, # rotation_indices[seed], # ) secondary_durations = primary_durations.rotate( rotation_indices[seed]) primary_timespans = self.primary_music_specifier( durations=primary_durations, layer=layer, division_masks=division_masks, division_mask_seed=division_mask_seed, padding=padding, seed=seed, start_offset=start_offset, timespan_specifier=timespan_specifier, voice_name=self.primary_voice_name, ) secondary_timespans = self.secondary_music_specifier( durations=secondary_durations, layer=layer, division_masks=division_masks, division_mask_seed=division_mask_seed, padding=padding, seed=seed, start_offset=start_offset, timespan_specifier=timespan_specifier, voice_name=self.secondary_voice_name, ) timespans = primary_timespans[:] + secondary_timespans[:] timespans = abjad.TimespanList(timespans) timespans.sort() return timespans
def separate_timespans_by_annotation(timespan_list): voices = [] for item in timespan_list: if item.annotation not in voices: voices.append(item.annotation) general_list = [] for i, voice in enumerate(voices): ts_list = abjad.TimespanList() for span in timespan_list: if span.annotation == voice: ts_list.append(span) general_list.append(ts_list) return general_list
def _make_timespans( self, layer=None, music_specifiers=None, target_timespan=None, timespan_list=None, ): new_timespans = abjad.TimespanList() if not self.voice_names and not self.labels: return new_timespans rotation_indices = self.rotation_indices or (0,) rotation_indices = abjad.CyclicTuple(rotation_indices) context_counter = collections.Counter() preexisting_timespans = self._collect_preexisting_timespans( target_timespan=target_timespan, timespan_list=timespan_list, ) partitioned_timespans = self._partition_preexisting_timespans( preexisting_timespans) for group_index, group in enumerate(partitioned_timespans): rotation_index = rotation_indices[group_index] offsets = set() offsets.add(group.start_offset) offsets.add(group.stop_offset) for timespan in group: if self.include_inner_starts: offsets.add(timespan.start_offset) if self.include_inner_stops: offsets.add(timespan.stop_offset) offsets = tuple(sorted(offsets)) durations = abjad.Sequence(mathtools.difference_series(offsets)) durations = durations.rotate(rotation_index) start_offset = offsets[0] for context_name, music_specifier in music_specifiers.items(): context_seed = context_counter[context_name] timespans = music_specifier( durations=durations, layer=layer, division_masks=self.division_masks, padding=self.padding, seed=context_seed, start_offset=start_offset, timespan_specifier=self.timespan_specifier, voice_name=context_name, ) context_counter[context_name] += 1 new_timespans.extend(timespans) return new_timespans
def add_silent_timespans(timespan_list, specifier=None): silent_timespans = abjad.TimespanList([ abjad.Timespan(start_offset=0, stop_offset=max(_.stop_offset for _ in timespan_list)) ]) for timespan in timespan_list: silent_timespans -= timespan for silent_timespan in silent_timespans: timespan_list.extend([ SilentTimespan( start_offset=silent_timespan.start_offset, stop_offset=silent_timespan.stop_offset, annotation=specifier, ) ]) return timespan_list
def make_showable_list(timespan_lists): master_list = abjad.TimespanList([]) for i, timespan_list in enumerate(timespan_lists): for timespan in timespan_list: if isinstance(timespan, SilentTimespan): new_span = SilentTimespan( start_offset=timespan.start_offset, stop_offset=timespan.stop_offset, annotation=str(i + 1), ) else: new_span = abjad.Timespan( start_offset=timespan.start_offset, stop_offset=timespan.stop_offset, annotation=str(i + 1), ) master_list.extend([new_span]) master_list.sort() return master_list
def talea_timespans(talea, advancement=0): talea = abjad.new(talea) talea = talea.advance(advancement) timespans = abjad.TimespanList([]) total_duration = 0 for duration in talea: start = total_duration stop = total_duration + abs(duration) if duration < 0: timespan = SilentTimespan(start_offset=start, stop_offset=stop, annotation=None) else: timespan = abjad.Timespan(start_offset=start, stop_offset=stop, annotation=None) timespans.append(timespan) total_duration += abs(duration) return timespans
def _make_timespans( self, layer=None, music_specifiers=None, target_timespan=None, timespan_list=None, ): start_offset = target_timespan.start_offset durations = [target_timespan.duration] new_timespans = abjad.TimespanList() for context_name, music_specifier in music_specifiers.items(): timespans = music_specifier( durations=durations, layer=layer, division_masks=self.division_masks, padding=self.padding, seed=self.seed, start_offset=start_offset, timespan_specifier=self.timespan_specifier, voice_name=context_name, ) new_timespans.extend(timespans) return new_timespans
def opposite_timespan_list(self): """Todo.""" for span1, span2 in zip(self, self[1:]): i = self.index(span1) if i == 0: if span1.start_offset == 0: pass else: new_initial_span = abjad.Timespan( start_offset=(0, 1), stop_offset=span1.start_offset, annotation="Rests " + self[0].annotation, ) self.append(new_initial_span) timespans = abjad.TimespanList([span1, span2]) if timespans.all_are_contiguous is False: new_span = abjad.Timespan( start_offset=span1.stop_offset, stop_offset=span2.start_offset, annotation="Rests " + self[i + 1].annotation, ) self.append(new_span) return self
voice_1_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset, stop_offset=stop_offset, annotation=MusicSpecifier( music_maker=music_maker, voice_name='Voice 1', ), ) for start_offset, stop_offset, music_maker in [ [(0, 4), (2, 4), sopranino_musicmaker_one], [(2, 4), (3, 4), sopranino_musicmaker_one], [(5, 4), (7, 4), sopranino_musicmaker_one], [(7, 4), (8, 4), sopranino_musicmaker_one], [(12, 4), (14, 4), sopranino_musicmaker_two], [(14, 4), (15, 4), sopranino_musicmaker_two], [(17, 4), (18, 4), sopranino_musicmaker_one], [(18, 4), (20, 4), sopranino_musicmaker_one], [(28, 4), (31, 4), sopranino_musicmaker_two], [(33, 4), (35, 4), sopranino_musicmaker_two], [(35, 4), (36, 4), sopranino_musicmaker_two], [(40, 4), (42, 4), sopranino_musicmaker_one], [(42, 4), (43, 4), sopranino_musicmaker_one], [(45, 4), (46, 4), sopranino_musicmaker_two], [(46, 4), (47, 4), sopranino_musicmaker_two], [(47, 4), (95, 8), sopranino_musicmaker_two], # [(95, 8), (96, 8), silence_maker], ] ])
rhythm_timespan_list = abjad.TimespanList([ tsmakers.PerformedTimespan( start_offset=abjad.Offset((0, 1)), stop_offset=abjad.Offset((4, 1)), voice_name="Voice 1", ), tsmakers.PerformedTimespan( start_offset=abjad.Offset((0, 1)), stop_offset=abjad.Offset((4, 1)), voice_name="Voice 2", ), tsmakers.PerformedTimespan( start_offset=abjad.Offset((0, 1)), stop_offset=abjad.Offset((3, 4)), voice_name="Voice 3", ), tsmakers.PerformedTimespan( start_offset=abjad.Offset((3, 4)), stop_offset=abjad.Offset((7, 4)), voice_name="Voice 3", ), tsmakers.PerformedTimespan( start_offset=abjad.Offset((7, 4)), stop_offset=abjad.Offset((4, 1)), voice_name="Voice 3", ), tsmakers.PerformedTimespan( start_offset=abjad.Offset((0, 1)), stop_offset=abjad.Offset((4, 1)), voice_name="Voice 4", ), ])
# Define an initial timespan structure, annotated with music specifiers. This # structure has not been split along meter boundaries. This structure does not # contain timespans explicitly representing silence. Here I make four, one # for each voice, using Python's list comprehension syntax to save some # space. print("Collecting timespans and rmakers ...") ###group one### voice_1_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset, stop_offset=stop_offset, annotation=MusicSpecifier(music_maker=music_maker, voice_name="Voice 1"), ) for start_offset, stop_offset, music_maker in [ [(0, 8), (7, 8), silence_maker], [(7, 8), (11, 8), silence_maker], [(11, 8), (17, 8), silence_maker], [(17, 8), (23, 8), silence_maker], [(23, 8), (28, 8), silence_maker], [(28, 8), (36, 8), silence_maker], ] ]) ###group two### voice_3_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset, stop_offset=stop_offset, annotation=MusicSpecifier(music_maker=music_maker, voice_name="Voice 3"),
import abjad ts_list = abjad.TimespanList() spans = [ abjad.AnnotatedTimespan(0, (1, 8), "hello"), abjad.AnnotatedTimespan((1, 8), (1, 4), "world"), ] print(spans) for _ in spans: ts_list.append(_) print(ts_list) durations = [timespan.duration for timespan in ts_list] print(durations) staff = abjad.Staff("c'4") print(abjad.lilypond(staff)) leaves = staff[:] abjad.mutate.split(leaves, durations, tie_split_notes=False) print(abjad.lilypond(staff))
def make_split_list(timespan_list, offsets): return abjad.TimespanList([ timespan for timespanlist in timespan_list.split_at_offsets(offsets) for timespan in timespanlist ])
from abjadext import rmakers timespan_maker = tsmakers.DependentTimespanMaker( include_inner_starts=True, include_inner_stops=True, voice_names=('Viola Voice', ), ) abjad.f(timespan_maker) timespan_list = abjad.TimespanList([ tsmakers.PerformedTimespan( voice_name='Viola Voice', start_offset=(1, 4), stop_offset=(1, 1), ), tsmakers.PerformedTimespan( voice_name='Viola Voice', start_offset=(3, 4), stop_offset=(3, 2), ), ]) music_specifiers = { 'Violin Voice': None, 'Cello Voice': None, } target_timespan = abjad.Timespan((1, 2), (2, 1)) timespan_list = timespan_maker( music_specifiers=music_specifiers, target_timespan=target_timespan, timespan_list=timespan_list,
voice_1_timespan_list = abjad.TimespanList( [ abjad.AnnotatedTimespan( start_offset=start_offset, stop_offset=stop_offset, annotation=MusicSpecifier(music_maker=music_maker, voice_name="Voice 1"), ) for start_offset, stop_offset, music_maker in [ [(0, 4), (4, 4), bowmaker], [(4, 4), (7, 4), bowmaker], [(12, 4), (15, 4), bowmaker], [(15, 4), (17, 4), bowmaker], [(17, 4), (20, 4), bowmaker], [(23, 4), (25, 4), bowmaker], [(25, 4), (27, 4), bowmaker], [(27, 4), (30, 4), bowmaker], [(32, 4), (36, 4), bowmaker], [(43, 4), (44, 4), bowmaker], [(44, 4), (48, 4), bowmaker], [(48, 4), (51, 4), bowmaker], [(52, 4), (56, 4), bowmaker], [(56, 4), (58, 4), bowmaker], [(62, 4), (64, 4), bowmaker], [(68, 4), (72, 4), bowmaker], [(72, 4), (76, 4), bowmaker], [(76, 4), (78, 4), bowmaker], [(78, 4), (81, 4), bowmaker], [(82, 4), (84, 4), bowmaker], [(84, 4), (87, 4), bowmaker], [(88, 4), (91, 4), bowmaker], [(91, 4), (93, 4), bowmaker], [(94, 4), (99, 4), bowmaker], [(100, 4), (103, 4), bowmaker], [(103, 4), (105, 4), bowmaker], [(106, 4), (110, 4), bowmaker], [(110, 4), (111, 4), bowmaker], [(112, 4), (114, 4), bowmaker], [(114, 4), (119, 4), bowmaker], [(122, 4), (126, 4), bowmaker], [(128, 4), (131, 4), bowmaker], [(132, 4), (134, 4), bowmaker], [(139, 4), (140, 4), bowmaker], [(144, 4), (146, 4), bowmaker], [(146, 4), (149, 4), bowmaker], [(150, 4), (153, 4), bowmaker], [(157, 4), (158, 4), bowmaker], [(158, 4), (162, 4), bowmaker], [(165, 4), (167, 4), bowmaker], [(167, 4), (169, 4), bowmaker], [(174, 4), (176, 4), bowmaker], [(176, 4), (177, 4), bowmaker], [(181, 4), (185, 4), bowmaker], [(185, 4), (186, 4), bowmaker], ] ] )
# space. print("Collecting timespans and rmakers ...") ###group one### voice_1_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset, stop_offset=stop_offset, annotation=MusicSpecifier(music_maker=music_maker, voice_name="Voice 1"), ) for start_offset, stop_offset, music_maker in [ [(0, 8), (3, 8), flutemusicmaker_one], [(4, 8), (8, 8), flutemusicmaker_two], [(10, 8), (12, 8), flutemusicmaker_three], [(12, 8), (15, 8), flutemusicmaker_one], [(18, 8), (24, 8), flutemusicmaker_two], [(28, 8), (33, 8), flutemusicmaker_three], [(33, 8), (35, 8), flutemusicmaker_one], [(40, 8), (42, 8), flutemusicmaker_two], [(42, 8), (44, 8), flutemusicmaker_three], [(44, 8), (48, 8), flutemusicmaker_one], [(54, 8), (55, 8), flutemusicmaker_two], [(62, 8), (64, 8), flutemusicmaker_three], [(72, 8), (75, 8), flutemusicmaker_one], [(76, 8), (79, 8), flutemusicmaker_two], [(79, 8), (80, 8), silence_maker], ] ]) voice_3_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset,
def _make_timespans( self, layer=None, music_specifiers=None, target_timespan=None, timespan_list=None, ): new_timespans = abjad.TimespanList() if not self.voice_names and not self.labels: return new_timespans start_talea = self.start_talea if start_talea is None: start_talea = rmakers.Talea((0, ), 1) start_talea = tsmakers.Cursor(start_talea) start_groupings = self.start_groupings if start_groupings is None: start_groupings = (1, ) start_groupings = tsmakers.Cursor(start_groupings) stop_talea = self.stop_talea if stop_talea is None: stop_talea = rmakers.Talea((0, ), 1) stop_talea = tsmakers.Cursor(stop_talea) stop_groupings = self.stop_groupings if stop_groupings is None: stop_groupings = (1, ) stop_groupings = tsmakers.Cursor(stop_groupings) if self.seed: if self.seed < 0: for _ in range(abs(self.seed)): start_talea.backtrack() start_groupings.backtrack() stop_talea.backtrack() stop_groupings.backtrack() else: next(start_talea) next(start_groupings) next(stop_talea) next(stop_groupings) context_counter = collections.Counter() preexisting_timespans = self._collect_preexisting_timespans( target_timespan=target_timespan, timespan_list=timespan_list, ) new_timespan_mapping = {} for group_index, group in enumerate( preexisting_timespans.partition(True)): for context_name, music_specifier in music_specifiers.items(): if context_name not in new_timespan_mapping: continue new_timespan_mapping[context_name] - group.timespan for context_name, music_specifier in music_specifiers.items(): if context_name not in new_timespan_mapping: new_timespan_mapping[context_name] = \ abjad.TimespanList() context_seed = context_counter[context_name] start_durations = [] for _ in range(next(start_groupings)): start_durations.append(next(start_talea)) stop_durations = [] for _ in range(next(stop_groupings)): stop_durations.append(next(stop_talea)) start_timespans, stop_timespans = (), () if start_durations: group_start = group.start_offset if self.start_anchor is abjad.Right: #print('!!!', float(group_start), float(group_start - # sum(start_durations))) group_start -= sum(start_durations) start_timespans = music_specifier( durations=start_durations, layer=layer, division_masks=self.division_masks, padding=self.padding, seed=context_seed, start_offset=group_start, timespan_specifier=self.timespan_specifier, voice_name=context_name, ) context_counter[context_name] += 1 if stop_durations: group_stop = group.stop_offset if self.stop_anchor is abjad.Right: group_stop -= sum(stop_durations) stop_timespans = music_specifier( durations=stop_durations, layer=layer, division_masks=self.division_masks, padding=self.padding, seed=context_seed, start_offset=group_stop, timespan_specifier=self.timespan_specifier, voice_name=context_name, ) context_counter[context_name] += 1 #if start_timespans and stop_timespans: # start_timespans & group.timespan new_timespan_mapping[context_name].extend(start_timespans) new_timespan_mapping[context_name].extend(stop_timespans) for context_name, timespans in new_timespan_mapping.items(): timespans.compute_logical_or() new_timespans.extend(timespans) return new_timespans
# Define an initial timespan structure, annotated with music specifiers. This # structure has not been split along meter boundaries. This structure does not # contain timespans explicitly representing silence. Here I make four, one # for each voice, using Python's list comprehension syntax to save some # space. print('Collecting timespans and rmakers ...') voice_1_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset, stop_offset=stop_offset, annotation=MusicSpecifier( rhythm_maker=rhythm_maker, voice_name='Voice 1', ), ) for start_offset, stop_offset, rhythm_maker in [ [0, 1, musicmaker_one], [2, 3, musicmaker_one], ] ]) voice_5_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset, stop_offset=stop_offset, annotation=MusicSpecifier( rhythm_maker=rhythm_maker, voice_name='Voice 5', ),
], ) print('Collecting timespans and rmakers ...') voice_1_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset, stop_offset=stop_offset, annotation=MusicSpecifier( rhythm_maker=rhythm_maker, voice_name='Voice 1', ), ) for start_offset, stop_offset, rhythm_maker in [ [(0, 4), (1, 4), rmaker_one], [(1, 4), (2, 4), rmaker_one], [(2, 4), (3, 4), rmaker_one], [(3, 4), (4, 4), rmaker_one], [(4, 4), (5, 4), rmaker_one], [(5, 4), (6, 4), rmaker_one], [(6, 4), (7, 4), rmaker_one], [(7, 4), (8, 4), rmaker_one], [(8, 4), (9, 4), rmaker_one], [(9, 4), (10, 4), rmaker_one], [(10, 4), (11, 4), rmaker_one], [(11, 4), (12, 4), rmaker_one], ] ]) voice_2_timespan_list = abjad.TimespanList([ abjad.AnnotatedTimespan( start_offset=start_offset,
def __call__( self, durations=None, layer=None, division_mask_seed=0, division_masks=None, padding=None, seed=None, start_offset=None, timespan_specifier=None, voice_name=None, ): timespans = abjad.TimespanList() timespan_specifier = timespan_specifier or \ tsmakers.TimespanSpecifier() seed = seed or 0 division_mask_seed = division_mask_seed or 0 durations = [_ for _ in durations if _] offsets = mathtools.cumulative_sums(durations, start_offset) if not offsets: return timespans offset_pair_count = len(offsets) - 1 if offset_pair_count == 1: offset_pair_count = 2 # make patterns happy iterator = abjad.Sequence(offsets).nwise() for i, offset_pair in enumerate(iterator): start_offset, stop_offset = offset_pair music_specifier = self[seed % len(self)] timespan = tsmakers.PerformedTimespan( forbid_fusing=timespan_specifier.forbid_fusing, forbid_splitting=timespan_specifier.forbid_splitting, layer=layer, minimum_duration=timespan_specifier.minimum_duration, music_specifier=music_specifier, start_offset=start_offset, stop_offset=stop_offset, voice_name=voice_name, ) if not division_masks: timespans.append(timespan) else: output_mask = division_masks.get_matching_pattern( i, offset_pair_count + 1, rotation=division_mask_seed) if output_mask is None: timespans.append(timespan) elif isinstance(output_mask, rmakers.SustainMask): timespans.append(timespan) elif isinstance(output_mask, rmakers.SilenceMask): pass division_mask_seed += 1 if self.application_rate == 'division': seed += 1 if padding: silent_timespans = abjad.TimespanList() for shard in timespans.partition(True): silent_timespan_one = tsmakers.SilentTimespan( layer=layer, start_offset=shard.start_offset - padding, stop_offset=shard.start_offset, voice_name=voice_name, ) silent_timespans.append(silent_timespan_one) silent_timespan_two = tsmakers.SilentTimespan( layer=layer, start_offset=shard.stop_offset, stop_offset=shard.stop_offset + padding, voice_name=voice_name, ) silent_timespans.append(silent_timespan_two) silent_timespans.compute_logical_or() for timespan in timespans: silent_timespans - timespan timespans.extend(silent_timespans) timespans.sort() return timespans
for instrument_ts1, instrument_ts2 in zip(instruments_timespans, instruments_timespans[1:]): error_str = "Instruments timespans must have the same total duration." assert instrument_ts1.duration == instrument_ts2.duration, error_str durations_aflute = timespans_aflute.AnnotatedDurations() durations_bclarinet = timespans_bclarinet.AnnotatedDurations() durations_piano = timespans_piano1e3.AnnotatedDurations() # durations_piano3 = timespans_piano1e3.AnnotatedDurations() durations_strings = timespans_strings.AnnotatedDurations() # durations_viola = timespans_strings.AnnotatedDurations() # durations_cello = timespans_strings.AnnotatedDurations() # time signatures for these timespan structures final_list = abjad.TimespanList() coincident_offsets = [] for ts_list1, ts_list2 in zip(instruments_timespans, instruments_timespans[1:]): for i, (span1, span2) in enumerate(zip(ts_list1, ts_list2)): if i == 0: coincident_offsets.append(abjad.Offset(0)) if span1.stop_offset == span2.stop_offset: coincident_offsets.append(abjad.Offset(span1.stop_offset)) else: if span1.stop_offset == span2.stop_offset: coincident_offsets.append(abjad.Offset(span1.stop_offset)) if span1.annotation == "Mat_B" and span2.annotation == "Mat_B": if span1 <= span2: coincident_offsets.append(span1.start_offset)