コード例 #1
0
  def testExtractNonZeroStart(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=100)

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, start_step=400, min_events_discard=1)
    self.assertEqual(0, len(perfs))
    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, start_step=0, min_events_discard=1)
    self.assertEqual(1, len(perfs))
コード例 #2
0
  def testExtractNonZeroStart(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=100)

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, start_step=400, min_events_discard=1)
    self.assertEqual(0, len(perfs))
    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, start_step=0, min_events_discard=1)
    self.assertEqual(1, len(perfs))
コード例 #3
0
 def transform(self, quantized_sequence):
   performances, stats = performance_lib.extract_performances(
       quantized_sequence,
       min_events_discard=self._min_events,
       max_events_truncate=self._max_events,
       num_velocity_bins=self._num_velocity_bins)
   self._set_stats(stats)
   return performances
コード例 #4
0
  def testExtractPerformancesMultiProgram(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    self.note_sequence.notes[0].program = 2
    quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=100)

    perfs, _ = performance_lib.extract_performances(quantized_sequence)
    self.assertEqual(0, len(perfs))
コード例 #5
0
  def testExtractPerformancesMultiProgram(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
    self.note_sequence.notes[0].program = 2
    quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=100)

    perfs, _ = performance_lib.extract_performances(quantized_sequence)
    self.assertEqual(0, len(perfs))
コード例 #6
0
  def testExtractPerformances(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=100)

    perfs, _ = performance_lib.extract_performances(quantized_sequence)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=10)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=8, max_events_truncate=10)
    self.assertEqual(0, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=3)
    self.assertEqual(1, len(perfs))
    self.assertEqual(3, len(perfs[0]))
コード例 #7
0
  def testExtractPerformances(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
    quantized_sequence = sequences_lib.quantize_note_sequence_absolute(
        self.note_sequence, steps_per_second=100)

    perfs, _ = performance_lib.extract_performances(quantized_sequence)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=10)
    self.assertEqual(1, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=8, max_events_truncate=10)
    self.assertEqual(0, len(perfs))

    perfs, _ = performance_lib.extract_performances(
        quantized_sequence, min_events_discard=1, max_events_truncate=3)
    self.assertEqual(1, len(perfs))
    self.assertEqual(3, len(perfs[0]))
コード例 #8
0
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.trim_note_sequence(
          input_sequence, input_section.start_time, input_section.end_time)
      input_start_step = mm.quantize_to_step(
          input_section.start_time, self.steps_per_second, quantize_cutoff=0.0)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time > generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_primer_sequence = mm.quantize_note_sequence_absolute(
        primer_sequence, self.steps_per_second)

    extracted_perfs, _ = performance_lib.extract_performances(
        quantized_primer_sequence, start_step=input_start_step,
        num_velocity_bins=self.num_velocity_bins)
    assert len(extracted_perfs) <= 1

    generate_start_step = mm.quantize_to_step(
        generate_section.start_time, self.steps_per_second, quantize_cutoff=0.0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    generate_end_step = mm.quantize_to_step(
        generate_section.end_time, self.steps_per_second, quantize_cutoff=1.0)

    if extracted_perfs and extracted_perfs[0]:
      performance = extracted_perfs[0]
    else:
      # If no track could be extracted, create an empty track that starts at the
      # requested generate_start_step.
      performance = performance_lib.Performance(
          steps_per_second=(
              quantized_primer_sequence.quantization_info.steps_per_second),
          start_step=generate_start_step,
          num_velocity_bins=self.num_velocity_bins)

    # Ensure that the track extends up to the step we want to start generating.
    performance.set_length(generate_start_step - performance.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    total_steps = performance.num_steps + (
        generate_end_step - generate_start_step)

    if not performance:
      # Primer is empty; let's just start with silence.
      performance.set_length(min(performance_lib.MAX_SHIFT_STEPS, total_steps))

    while performance.num_steps < total_steps:
      # Assume there's around 10 notes per second and 4 RNN steps per note.
      # Can't know for sure until generation is finished because the number of
      # notes per quantized step is variable.
      steps_to_gen = total_steps - performance.num_steps
      rnn_steps_to_gen = 40 * int(math.ceil(
          float(steps_to_gen) / performance_lib.DEFAULT_STEPS_PER_SECOND))
      tf.logging.info(
          'Need to generate %d more steps for this sequence, will try asking '
          'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
      performance = self._model.generate_performance(
          len(performance) + rnn_steps_to_gen, performance, **args)

      if not self.fill_generate_section:
        # In the interest of speed just go through this loop once, which may not
        # entirely fill the generate section.
        break

    performance.set_length(total_steps)

    generated_sequence = performance.to_sequence(
        max_note_duration=self.max_note_duration)

    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
コード例 #9
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise mm.SequenceGeneratorException(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise mm.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   self.steps_per_second,
                                                   quantize_cutoff=0.0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        last_end_time = (max(
            n.end_time
            for n in primer_sequence.notes) if primer_sequence.notes else 0)
        if last_end_time > generate_section.start_time:
            raise mm.SequenceGeneratorException(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_primer_sequence = mm.quantize_note_sequence_absolute(
            primer_sequence, self.steps_per_second)

        extracted_perfs, _ = performance_lib.extract_performances(
            quantized_primer_sequence,
            start_step=input_start_step,
            num_velocity_bins=self.num_velocity_bins)
        assert len(extracted_perfs) <= 1

        generate_start_step = mm.quantize_to_step(generate_section.start_time,
                                                  self.steps_per_second,
                                                  quantize_cutoff=0.0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        generate_end_step = mm.quantize_to_step(generate_section.end_time,
                                                self.steps_per_second,
                                                quantize_cutoff=1.0)

        if extracted_perfs and extracted_perfs[0]:
            performance = extracted_perfs[0]
        else:
            # If no track could be extracted, create an empty track that starts at the
            # requested generate_start_step.
            performance = performance_lib.Performance(
                steps_per_second=(quantized_primer_sequence.quantization_info.
                                  steps_per_second),
                start_step=generate_start_step,
                num_velocity_bins=self.num_velocity_bins)

        # Ensure that the track extends up to the step we want to start generating.
        performance.set_length(generate_start_step - performance.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'note_density': lambda arg: ast.literal_eval(arg.string_value),
            'pitch_histogram': lambda arg: ast.literal_eval(arg.string_value),
            'disable_conditioning':
            lambda arg: ast.literal_eval(arg.string_value),
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        # Make sure note density is present when conditioning on it and not present
        # otherwise.
        if not self.note_density_conditioning and 'note_density' in args:
            tf.logging.warning(
                'Not conditioning on note density, ignoring requested density.'
            )
            del args['note_density']
        if self.note_density_conditioning and 'note_density' not in args:
            tf.logging.warning(
                'Conditioning on note density but none requested, using default.'
            )
            args['note_density'] = [DEFAULT_NOTE_DENSITY]

        # Make sure pitch class histogram is present when conditioning on it and not
        # present otherwise.
        if not self.pitch_histogram_conditioning and 'pitch_histogram' in args:
            tf.logging.warning(
                'Not conditioning on pitch histogram, ignoring requested histogram.'
            )
            del args['pitch_histogram']
        if self.pitch_histogram_conditioning and 'pitch_histogram' not in args:
            tf.logging.warning(
                'Conditioning on pitch histogram but none requested, using default.'
            )
            args['pitch_histogram'] = [DEFAULT_PITCH_HISTOGRAM]

        # Make sure disable conditioning flag is present when conditioning is
        # optional and not present otherwise.
        if not self.optional_conditioning and 'disable_conditioning' in args:
            tf.logging.warning(
                'No optional conditioning, ignoring disable conditioning flag.'
            )
            del args['disable_conditioning']
        if self.optional_conditioning and 'disable_conditioning' not in args:
            args['disable_conditioning'] = [False]

        # If a single note density, pitch class histogram, or disable flag is
        # present, convert to list to simplify further processing.
        if (self.note_density_conditioning
                and not isinstance(args['note_density'], list)):
            args['note_density'] = [args['note_density']]
        if (self.pitch_histogram_conditioning
                and not isinstance(args['pitch_histogram'][0], list)):
            args['pitch_histogram'] = [args['pitch_histogram']]
        if (self.optional_conditioning
                and not isinstance(args['disable_conditioning'], list)):
            args['disable_conditioning'] = [args['disable_conditioning']]

        # Make sure each pitch class histogram sums to one.
        if self.pitch_histogram_conditioning:
            for i in range(len(args['pitch_histogram'])):
                total = sum(args['pitch_histogram'][i])
                if total > 0:
                    args['pitch_histogram'][i] = [
                        float(count) / total
                        for count in args['pitch_histogram'][i]
                    ]
                else:
                    tf.logging.warning(
                        'Pitch histogram is empty, using default.')
                    args['pitch_histogram'][i] = DEFAULT_PITCH_HISTOGRAM

        total_steps = performance.num_steps + (generate_end_step -
                                               generate_start_step)

        # Set up functions that map generation step to note density, pitch
        # histogram, and disable conditioning flag.
        mean_note_density = DEFAULT_NOTE_DENSITY
        if self.note_density_conditioning:
            args['note_density_fn'] = partial(
                _step_to_note_density,
                num_steps=total_steps,
                note_densities=args['note_density'])
            mean_note_density = sum(args['note_density']) / len(
                args['note_density'])
            del args['note_density']
        if self.pitch_histogram_conditioning:
            args['pitch_histogram_fn'] = partial(
                _step_to_pitch_histogram,
                num_steps=total_steps,
                pitch_histograms=args['pitch_histogram'])
            del args['pitch_histogram']
        if self.optional_conditioning:
            args['disable_conditioning_fn'] = partial(
                _step_to_disable_conditioning,
                num_steps=total_steps,
                disable_conditioning_flags=args['disable_conditioning'])
            del args['disable_conditioning']

        if not performance:
            # Primer is empty; let's just start with silence.
            performance.set_length(
                min(performance_lib.MAX_SHIFT_STEPS, total_steps))

        while performance.num_steps < total_steps:
            # Assume the average specified (or default) note density and 4 RNN steps
            # per note. Can't know for sure until generation is finished because the
            # number of notes per quantized step is variable.
            note_density = max(1.0, mean_note_density)
            steps_to_gen = total_steps - performance.num_steps
            rnn_steps_to_gen = int(
                math.ceil(4.0 * note_density * steps_to_gen /
                          self.steps_per_second))
            tf.logging.info(
                'Need to generate %d more steps for this sequence, will try asking '
                'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
            performance = self._model.generate_performance(
                len(performance) + rnn_steps_to_gen, performance, **args)

            if not self.fill_generate_section:
                # In the interest of speed just go through this loop once, which may not
                # entirely fill the generate section.
                break

        performance.set_length(total_steps)

        generated_sequence = performance.to_sequence(
            max_note_duration=self.max_note_duration)

        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
コード例 #10
0
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.trim_note_sequence(
          input_sequence, input_section.start_time, input_section.end_time)
      input_start_step = mm.quantize_to_step(
          input_section.start_time, self.steps_per_second, quantize_cutoff=0.0)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time > generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_primer_sequence = mm.quantize_note_sequence_absolute(
        primer_sequence, self.steps_per_second)

    extracted_perfs, _ = performance_lib.extract_performances(
        quantized_primer_sequence, start_step=input_start_step,
        num_velocity_bins=self.num_velocity_bins)
    assert len(extracted_perfs) <= 1

    generate_start_step = mm.quantize_to_step(
        generate_section.start_time, self.steps_per_second, quantize_cutoff=0.0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    generate_end_step = mm.quantize_to_step(
        generate_section.end_time, self.steps_per_second, quantize_cutoff=1.0)

    if extracted_perfs and extracted_perfs[0]:
      performance = extracted_perfs[0]
    else:
      # If no track could be extracted, create an empty track that starts at the
      # requested generate_start_step.
      performance = performance_lib.Performance(
          steps_per_second=(
              quantized_primer_sequence.quantization_info.steps_per_second),
          start_step=generate_start_step,
          num_velocity_bins=self.num_velocity_bins)

    # Ensure that the track extends up to the step we want to start generating.
    performance.set_length(generate_start_step - performance.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'note_density': lambda arg: ast.literal_eval(arg.string_value),
        'pitch_histogram': lambda arg: ast.literal_eval(arg.string_value),
        'disable_conditioning': lambda arg: ast.literal_eval(arg.string_value),
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    # Make sure note density is present when conditioning on it and not present
    # otherwise.
    if not self.note_density_conditioning and 'note_density' in args:
      tf.logging.warning(
          'Not conditioning on note density, ignoring requested density.')
      del args['note_density']
    if self.note_density_conditioning and 'note_density' not in args:
      tf.logging.warning(
          'Conditioning on note density but none requested, using default.')
      args['note_density'] = [DEFAULT_NOTE_DENSITY]

    # Make sure pitch class histogram is present when conditioning on it and not
    # present otherwise.
    if not self.pitch_histogram_conditioning and 'pitch_histogram' in args:
      tf.logging.warning(
          'Not conditioning on pitch histogram, ignoring requested histogram.')
      del args['pitch_histogram']
    if self.pitch_histogram_conditioning and 'pitch_histogram' not in args:
      tf.logging.warning(
          'Conditioning on pitch histogram but none requested, using default.')
      args['pitch_histogram'] = [DEFAULT_PITCH_HISTOGRAM]

    # Make sure disable conditioning flag is present when conditioning is
    # optional and not present otherwise.
    if not self.optional_conditioning and 'disable_conditioning' in args:
      tf.logging.warning(
          'No optional conditioning, ignoring disable conditioning flag.')
      del args['disable_conditioning']
    if self.optional_conditioning and 'disable_conditioning' not in args:
      args['disable_conditioning'] = [False]

    # If a single note density, pitch class histogram, or disable flag is
    # present, convert to list to simplify further processing.
    if (self.note_density_conditioning and
        not isinstance(args['note_density'], list)):
      args['note_density'] = [args['note_density']]
    if (self.pitch_histogram_conditioning and
        not isinstance(args['pitch_histogram'][0], list)):
      args['pitch_histogram'] = [args['pitch_histogram']]
    if (self.optional_conditioning and
        not isinstance(args['disable_conditioning'], list)):
      args['disable_conditioning'] = [args['disable_conditioning']]

    # Make sure each pitch class histogram sums to one.
    if self.pitch_histogram_conditioning:
      for i in range(len(args['pitch_histogram'])):
        total = sum(args['pitch_histogram'][i])
        if total > 0:
          args['pitch_histogram'][i] = [float(count) / total
                                        for count in args['pitch_histogram'][i]]
        else:
          tf.logging.warning('Pitch histogram is empty, using default.')
          args['pitch_histogram'][i] = DEFAULT_PITCH_HISTOGRAM

    total_steps = performance.num_steps + (
        generate_end_step - generate_start_step)

    # Set up functions that map generation step to note density, pitch
    # histogram, and disable conditioning flag.
    mean_note_density = DEFAULT_NOTE_DENSITY
    if self.note_density_conditioning:
      args['note_density_fn'] = partial(
          _step_to_note_density,
          num_steps=total_steps,
          note_densities=args['note_density'])
      mean_note_density = sum(args['note_density']) / len(args['note_density'])
      del args['note_density']
    if self.pitch_histogram_conditioning:
      args['pitch_histogram_fn'] = partial(
          _step_to_pitch_histogram,
          num_steps=total_steps,
          pitch_histograms=args['pitch_histogram'])
      del args['pitch_histogram']
    if self.optional_conditioning:
      args['disable_conditioning_fn'] = partial(
          _step_to_disable_conditioning,
          num_steps=total_steps,
          disable_conditioning_flags=args['disable_conditioning'])
      del args['disable_conditioning']

    if not performance:
      # Primer is empty; let's just start with silence.
      performance.set_length(min(performance_lib.MAX_SHIFT_STEPS, total_steps))

    while performance.num_steps < total_steps:
      # Assume the average specified (or default) note density and 4 RNN steps
      # per note. Can't know for sure until generation is finished because the
      # number of notes per quantized step is variable.
      note_density = max(1.0, mean_note_density)
      steps_to_gen = total_steps - performance.num_steps
      rnn_steps_to_gen = int(math.ceil(
          4.0 * note_density * steps_to_gen / self.steps_per_second))
      tf.logging.info(
          'Need to generate %d more steps for this sequence, will try asking '
          'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
      performance = self._model.generate_performance(
          len(performance) + rnn_steps_to_gen, performance, **args)

      if not self.fill_generate_section:
        # In the interest of speed just go through this loop once, which may not
        # entirely fill the generate section.
        break

    performance.set_length(total_steps)

    generated_sequence = performance.to_sequence(
        max_note_duration=self.max_note_duration)

    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence