def testExtractPolyphonicSequences(self): testing_lib.add_track_to_sequence(self.note_sequence, 0, [(60, 100, 0.0, 4.0)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_sequence) self.assertEqual(1, len(seqs)) seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_sequence, min_steps_discard=2, max_steps_discard=5) self.assertEqual(1, len(seqs)) self.note_sequence.notes[0].end_time = 1.0 self.note_sequence.total_time = 1.0 quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_sequence, min_steps_discard=3, max_steps_discard=5) self.assertEqual(0, len(seqs)) self.note_sequence.notes[0].end_time = 10.0 self.note_sequence.total_time = 10.0 quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_sequence, min_steps_discard=3, max_steps_discard=5) self.assertEqual(0, len(seqs))
def testExtractNonZeroStart(self): testing_lib.add_track_to_sequence(self.note_sequence, 0, [(60, 100, 0.0, 4.0)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_sequence, start_step=4, min_steps_discard=1) self.assertEqual(0, len(seqs)) seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_sequence, start_step=0, min_steps_discard=1) self.assertEqual(1, len(seqs))
def transform(self, quantized_sequence): poly_seqs, stats = polyphony_lib.extract_polyphonic_sequences( quantized_sequence, min_steps_discard=self._min_steps, max_steps_discard=self._max_steps) self._set_stats(stats) return poly_seqs
def testExtractPolyphonicMultiProgram(self): testing_lib.add_track_to_sequence( self.note_sequence, 0, [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)]) self.note_sequence.notes[0].program = 2 quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) seqs, _ = polyphony_lib.extract_polyphonic_sequences(quantized_sequence) self.assertEqual(0, len(seqs))
def testExtractPolyphonicMultiInstrument(self): testing_lib.add_track_to_sequence(self.note_sequence, 0, [(60, 100, 0.0, 4.0)]) testing_lib.add_track_to_sequence(self.note_sequence, 1, [(60, 100, 0.0, 4.0)]) quantized_sequence = sequences_lib.quantize_note_sequence( self.note_sequence, steps_per_quarter=1) seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_sequence) self.assertEqual(0, len(seqs))
def _generate(self, input_sequence, generator_options): if len(generator_options.input_sections) > 1: raise mm.SequenceGeneratorException( 'This model supports at most one input_sections message, but got %s' % len(generator_options.input_sections)) if len(generator_options.generate_sections) != 1: raise mm.SequenceGeneratorException( 'This model supports only 1 generate_sections message, but got %s' % len(generator_options.generate_sections)) # This sequence will be quantized later, so it is guaranteed to have only 1 # tempo. qpm = mm.DEFAULT_QUARTERS_PER_MINUTE if input_sequence.tempos: qpm = input_sequence.tempos[0].qpm generate_section = generator_options.generate_sections[0] if generator_options.input_sections: input_section = generator_options.input_sections[0] primer_sequence = mm.extract_subsequence(input_sequence, input_section.start_time, input_section.end_time) input_start_step = self.seconds_to_steps(input_section.start_time, qpm) else: primer_sequence = input_sequence input_start_step = 0 last_end_time = (max( n.end_time for n in primer_sequence.notes) if primer_sequence.notes else 0) if last_end_time > generate_section.start_time: raise mm.SequenceGeneratorException( 'Got GenerateSection request for section that is before or equal to ' 'the end of the NoteSequence. This model can only extend sequences. ' 'Requested start time: %s, Final note end time: %s' % (generate_section.start_time, last_end_time)) # Quantize the priming sequence. quantized_primer_sequence = mm.quantize_note_sequence( primer_sequence, self.steps_per_quarter) extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_primer_sequence, start_step=input_start_step) assert len(extracted_seqs) <= 1 generate_start_step = self.seconds_to_steps( generate_section.start_time, qpm) generate_end_step = self.seconds_to_steps(generate_section.end_time, qpm) if extracted_seqs and extracted_seqs[0]: poly_seq = extracted_seqs[0] else: # If no track could be extracted, create an empty track that starts at the # requested generate_start_step. This will result in a sequence that # contains only the START token. poly_seq = polyphony_lib.PolyphonicSequence( steps_per_quarter=(quantized_primer_sequence.quantization_info. steps_per_quarter), start_step=generate_start_step) # Ensure that the track extends up to the step we want to start generating. poly_seq.set_length(generate_start_step - poly_seq.start_step) # Trim any trailing end events to prepare the sequence for more events to be # appended during generation. poly_seq.trim_trailing_end_events() # Extract generation arguments from generator options. arg_types = { 'temperature': lambda arg: arg.float_value, 'beam_size': lambda arg: arg.int_value, 'branch_factor': lambda arg: arg.int_value, 'steps_per_iteration': lambda arg: arg.int_value } args = dict((name, value_fn(generator_options.args[name])) for name, value_fn in arg_types.items() if name in generator_options.args) # Inject the priming sequence as melody in the output of the generator. # Note that start_step is 0 because we overwrite poly_seq below. If we # included the priming sequence in poly_seq, it would be poly_seq.num_steps. melody_to_inject = copy.deepcopy(poly_seq) args['modify_events_callback'] = partial(_inject_melody, melody_to_inject, 0) # Overwrite poly_seq with a blank sequence to feed into the generator so it # is conditioned only on the melody events that are injected as the sequence # is created. Otherwise, the generator would have to determine the most # likely sequence to follow a monophonic line, which is something not # present in the current training data (Bach Chorales). poly_seq = polyphony_lib.PolyphonicSequence( steps_per_quarter=( quantized_primer_sequence.quantization_info.steps_per_quarter), start_step=generate_start_step) poly_seq.trim_trailing_end_events() # If we wanted to include the priming sequence and didn't clear poly_seq # above, this is how we would calculate total_steps. # total_steps = poly_seq.num_steps + ( # generate_end_step - generate_start_step) total_steps = generate_end_step - generate_start_step while poly_seq.num_steps < total_steps: # Assume it takes ~5 rnn steps to generate one quantized step. # Can't know for sure until generation is finished because the number of # notes per quantized step is variable. steps_to_gen = total_steps - poly_seq.num_steps rnn_steps_to_gen = 5 * steps_to_gen tf.logging.info( 'Need to generate %d more steps for this sequence, will try asking ' 'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen)) poly_seq = self._model.generate_polyphonic_sequence( len(poly_seq) + rnn_steps_to_gen, poly_seq, **args) poly_seq.set_length(total_steps) # Specify a base_note_sequence because the priming sequence is not included # in poly_seq. If we did not clear poly_seq above, then we would not want to # specify a base_note_sequence. generated_sequence = poly_seq.to_sequence( qpm=qpm, base_note_sequence=copy.deepcopy(primer_sequence)) assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5 return generated_sequence
def _generate(self, input_sequence, generator_options): if len(generator_options.input_sections) > 1: raise mm.SequenceGeneratorException( 'This model supports at most one input_sections message, but got %s' % len(generator_options.input_sections)) if len(generator_options.generate_sections) != 1: raise mm.SequenceGeneratorException( 'This model supports only 1 generate_sections message, but got %s' % len(generator_options.generate_sections)) # This sequence will be quantized later, so it is guaranteed to have only 1 # tempo. qpm = mm.DEFAULT_QUARTERS_PER_MINUTE if input_sequence.tempos: qpm = input_sequence.tempos[0].qpm generate_section = generator_options.generate_sections[0] if generator_options.input_sections: input_section = generator_options.input_sections[0] primer_sequence = mm.extract_subsequence(input_sequence, input_section.start_time, input_section.end_time) input_start_step = self.seconds_to_steps(input_section.start_time, qpm) else: primer_sequence = input_sequence input_start_step = 0 last_end_time = (max( n.end_time for n in primer_sequence.notes) if primer_sequence.notes else 0) if last_end_time > generate_section.start_time: raise mm.SequenceGeneratorException( 'Got GenerateSection request for section that is before or equal to ' 'the end of the NoteSequence. This model can only extend sequences. ' 'Requested start time: %s, Final note end time: %s' % (generate_section.start_time, last_end_time)) # Quantize the priming sequence. quantized_primer_sequence = mm.quantize_note_sequence( primer_sequence, self.steps_per_quarter) extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences( quantized_primer_sequence, start_step=input_start_step) assert len(extracted_seqs) <= 1 start_step = self.seconds_to_steps(generate_section.start_time, qpm) end_step = self.seconds_to_steps(generate_section.end_time, qpm) if extracted_seqs and extracted_seqs[0]: poly_seq = extracted_seqs[0] else: # If no track could be extracted, create an empty track that starts 1 step # before the request start_step. This will result in 1 step of silence # when the track is extended below. poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=( quantized_primer_sequence.quantization_info.steps_per_quarter), start_step=start_step) # Ensure that the track extends up to the step we want to start generating. poly_seq.set_length(start_step - poly_seq.start_step) poly_seq.trim_trailing_end_and_step_end_events() # Extract generation arguments from generator options. arg_types = { 'temperature': lambda arg: arg.float_value, 'beam_size': lambda arg: arg.int_value, 'branch_factor': lambda arg: arg.int_value, 'steps_per_iteration': lambda arg: arg.int_value } args = dict((name, value_fn(generator_options.args[name])) for name, value_fn in arg_types.items() if name in generator_options.args) total_steps = end_step - start_step while poly_seq.num_steps < total_steps: # Assume it takes ~5 rnn steps to generate one quantized step. # Can't know for sure until generation is finished because the number of # notes per quantized step is variable. steps_to_gen = total_steps - poly_seq.num_steps rnn_steps_to_gen = 5 * steps_to_gen tf.logging.info( 'Need to generate %d more steps for this sequence, will try asking ' 'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen)) poly_seq = self._model.generate_polyphonic_sequence( len(poly_seq) + rnn_steps_to_gen, poly_seq, **args) poly_seq.set_length(total_steps) generated_sequence = poly_seq.to_sequence(qpm=qpm) assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5 return generated_sequence