示例#1
0
 def mix(self, verbose=True):
     """
     Mix all the patterns into a single result sample.
     """
     if not self.patterns:
         if verbose:
             print("No patterns to mix, output is empty.")
         return Sample()
     total_seconds = 0.0
     for p in self.patterns:
         bar = next(iter(p.values()))
         total_seconds += len(bar) * 60.0 / self.bpm / self.ticks
     if verbose:
         print("Mixing {:d} patterns...".format(len(self.patterns)))
     mixed = Sample().make_32bit()
     for index, timestamp, sample in self.mixed_samples(tracker=False):
         if verbose:
             print("\r{:3.0f} % ".format(timestamp / total_seconds * 100),
                   end="")
         mixed.mix_at(timestamp, sample)
     # chop/extend to get to the precise total duration (in case of silence in the last bars etc)
     missing = total_seconds - mixed.duration
     if missing > 0:
         mixed.add_silence(missing)
     elif missing < 0:
         mixed.clip(0, total_seconds)
     if verbose:
         print("\rMix done.")
     return mixed
示例#2
0
 def mix_generator(self):
     """
     Returns a generator that produces samples that are the chronological
     chunks of the final output mix. This avoids having to mix it into one big
     output mix sample.
     """
     if not self.patterns:
         yield Sample()
         return
     total_seconds = 0.0
     for p in self.patterns:
         bar = next(iter(p.values()))
         total_seconds += len(bar) * 60.0 / self.bpm / self.ticks
     mixed_duration = 0.0
     samples = self.mixed_samples()
     # get the first sample
     index, previous_timestamp, sample = next(samples)
     mixed = Sample().make_32bit()
     mixed.mix_at(previous_timestamp, sample)
     # continue mixing the following samples
     for index, timestamp, sample in samples:
         trigger_duration = timestamp - previous_timestamp
         overflow = None
         if mixed.duration < trigger_duration:
             # fill with some silence to reach the next sample position
             mixed.add_silence(trigger_duration - mixed.duration)
         elif mixed.duration > trigger_duration:
             # chop off the sound that extends into the next sample position
             # keep this overflow and mix it later!
             overflow = mixed.split(trigger_duration)
         mixed_duration += mixed.duration
         yield mixed
         mixed = overflow if overflow else Sample().make_32bit()
         mixed.mix(sample)
         previous_timestamp = timestamp
     # output the last remaining sample and extend it to the end of the duration if needed
     timestamp = total_seconds
     trigger_duration = timestamp - previous_timestamp
     if mixed.duration < trigger_duration:
         mixed.add_silence(trigger_duration - mixed.duration)
     elif mixed.duration > trigger_duration:
         mixed.clip(0, trigger_duration)
     mixed_duration += mixed.duration
     yield mixed
示例#3
0
    def mix(self, song):

        # kick = ('x-x-' * 4)[:len(melody)]
        # snare = ('-x-x' * 4)[:len(melody)]

        # total_seconds = len(melody) * 60.0 / self.bpm / self.ticks
        mixed = Sample().make_32bit()
        time_per_index = 60.0 / song.bpm / song.ticks

        notes = max(len(song.melody), len(song.drums))

        for i in range(notes):

            timestamp = time_per_index * i

            try:
                chord = song.melody[i]
            except (IndexError, TypeError) as e:
                pass
            else:
                if chord != REST_NOTE:
                    if isinstance(chord, list):
                        group = chord[:5]
                    else:
                        group = [chord]
                    for note in group:
                        sample = self.samples[f'lead_{note}']
                        if len(chord) > 1:  # 2
                            # volume = 1/(len(chord))
                            # volume = (1 / len(chord)) * 1.75
                            # volume = 1 - math.log10(len(chord) - 1)
                            # if len(chord) == 2:
                            #     volume = 0.7
                            # volume = 0.5
                            # print('notes/volume', len(chord), volume)

                            volume = [1, 0.9, 0.65, 0.6, 0.5][len(chord) - 1]

                            mixed.mix_at(timestamp, sample.at_volume(volume))

                            # .mix_at(timestamp, sample.at_volume(1/len(chord)-1))
                        # elif len(chord) == 2:
                        #     mixed.mix_at(timestamp, sample.at_volume(0.75))
                        else:
                            mixed.mix_at(timestamp, sample)

            try:
                beat = song.drums[i]
            except (IndexError, TypeError) as e:
                pass
            else:
                if beat != REST_NOTE:
                    for instrument in beat[:5]:
                        mixed.mix_at(timestamp, self.samples[instrument])

            # for instrument, pattern in song.drums.items():
            #     try:
            #         char = pattern[i]
            #     except (IndexError, TypeError) as e:
            #         continue
            #
            #     if char == instrument:
            #         mixed.mix_at(timestamp, self.samples[instrument])

        # chop/extend to get to the precise total duration (in case of silence in the last bars etc)
        # missing = total_seconds - mixed.duration
        # print('missing', missing)
        # if missing > 0:
        #     mixed.add_silence(missing)
        # elif missing < 0:
        #     mixed.clip(0, total_seconds)

        return mixed