コード例 #1
0
def create_example(filename):
    """Processes an audio file into an Example proto."""
    wav_data = tf.gfile.Open(filename).read()
    example_list = list(
        split_audio_and_label_data.process_record(
            wav_data=wav_data,
            ns=music_pb2.NoteSequence(),
            example_id=filename,
            min_length=0,
            max_length=-1,
            allow_empty_notesequence=True))
    assert len(example_list) == 1
    return example_list[0].SerializeToString()
コード例 #2
0
    def testStartCapture_Iterate_Period_Overrun(self):
        start_time = 1.0
        captor = self.midi_hub.start_capture(120,
                                             start_time,
                                             stop_signal=midi_hub.MidiSignal(
                                                 type='control_change',
                                                 control=1))

        for msg in self.capture_messages[:-1]:
            threading.Timer(0.1 * msg.time, self.port.callback,
                            args=[msg]).start()

        period = 0.26
        captured_seqs = []
        wall_start_time = time.time()
        for captured_seq in captor.iterate(period=period):
            time.sleep(0.5)
            captured_seqs.append(captured_seq)

        self.assertEqual(2, len(captured_seqs))

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        end_time = captured_seqs[0].total_time
        self.assertAlmostEqual(wall_start_time + period, end_time, delta=0.005)
        expected_seq.total_time = end_time
        testing_lib.add_track_to_sequence(expected_seq, 0,
                                          [Note(1, 64, 2, end_time)])
        self.assertProtoEquals(captured_seqs[0], expected_seq)

        expected_seq = music_pb2.NoteSequence()
        expected_seq.tempos.add(qpm=120)
        expected_seq.total_time = 6
        testing_lib.add_track_to_sequence(
            expected_seq, 0,
            [Note(1, 64, 2, 5),
             Note(2, 64, 3, 4),
             Note(3, 64, 4, 6)])
        self.assertProtoEquals(captured_seqs[1], expected_seq)
コード例 #3
0
    def to_sequence(self,
                    velocity=100,
                    instrument=0,
                    sequence_start_time=0.0,
                    bpm=120.0):
        """Converts the Melody to Sequence proto.

    Args:
      velocity: Midi velocity to give each note. Between 1 and 127 (inclusive).
      instrument: Midi instrument to give each note.
      sequence_start_time: A time in seconds (float) that the first note in the
        sequence will land on.
      bpm: Beats per minute (float).

    Returns:
      A NoteSequence proto encoding the given melody.
    """
        seconds_per_step = 60.0 / bpm * BEATS_PER_BAR / self.steps_per_bar

        sequence = music_pb2.NoteSequence()
        sequence.tempos.add().bpm = bpm
        sequence.ticks_per_beat = STANDARD_PPQ

        current_sequence_note = None
        for step, note in enumerate(self):
            if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH:
                # End any sustained notes.
                if current_sequence_note is not None:
                    current_sequence_note.end_time = (step * seconds_per_step +
                                                      sequence_start_time)

                # Add a note.
                current_sequence_note = sequence.notes.add()
                current_sequence_note.start_time = (step * seconds_per_step +
                                                    sequence_start_time)
                # Give the note an end time now just to be sure it gets closed.
                current_sequence_note.end_time = (
                    (step + 1) * seconds_per_step + sequence_start_time)
                current_sequence_note.pitch = note
                current_sequence_note.velocity = velocity
                current_sequence_note.instrument = instrument

            elif note == NOTE_OFF:
                # End any sustained notes.
                if current_sequence_note is not None:
                    current_sequence_note.end_time = (step * seconds_per_step +
                                                      sequence_start_time)
                    current_sequence_note = None

        return sequence
コード例 #4
0
  def encode(self, s):
    """Transform a MusicXML filename into a list of score event index tuples.

    Args:
      s: Path to the MusicXML file.

    Returns:
      ids: List of score event index tuples.
    """
    if s:
      ns = magenta.music.musicxml_file_to_sequence_proto(s)
    else:
      ns = music_pb2.NoteSequence()
    return self.encode_note_sequence(ns)
コード例 #5
0
  def encode(self, s):
    """Transform a MIDI filename into a list of performance event indices.

    Args:
      s: Path to the MIDI file.

    Returns:
      ids: List of performance event indices.
    """
    if s:
      ns = magenta.music.midi_file_to_sequence_proto(s)
    else:
      ns = music_pb2.NoteSequence()
    return self.encode_note_sequence(ns)
コード例 #6
0
 def setUp(self):
     sequence = music_pb2.NoteSequence()
     sequence.tempos.add(qpm=60)
     testing_lib.add_track_to_sequence(sequence, 0, [(32, 100, 2, 4),
                                                     (33, 100, 6, 11),
                                                     (34, 100, 11, 13),
                                                     (35, 100, 17, 18)])
     testing_lib.add_track_to_sequence(sequence,
                                       1, [(57, 80, 4, 4.1),
                                           (58, 80, 12, 12.1)],
                                       is_drum=True)
     testing_lib.add_chords_to_sequence(sequence, [('N.C.', 0), ('C', 8),
                                                   ('Am', 16)])
     self.sequence = sequence
コード例 #7
0
ファイル: midi_io_test.py プロジェクト: DavidPrimor/magenta
    def testEmptySequenceToPrettyMidi_DropEventsAfterLastNote(self):
        source_sequence = music_pb2.NoteSequence()

        # Translate without dropping.
        translated_midi = midi_io.sequence_proto_to_pretty_midi(
            source_sequence)
        self.assertEqual(1, len(translated_midi.instruments))
        self.assertEqual(0, len(translated_midi.instruments[0].notes))

        # Translate dropping anything after 30 seconds.
        translated_midi = midi_io.sequence_proto_to_pretty_midi(
            source_sequence, drop_events_n_seconds_after_last_note=30)
        self.assertEqual(1, len(translated_midi.instruments))
        self.assertEqual(0, len(translated_midi.instruments[0].notes))
コード例 #8
0
ファイル: sequences_lib.py プロジェクト: hyerim1048/magenta
def concatenate_sequences(sequences, sequence_durations=None):
    """Concatenate a series of NoteSequences together.

  Individual sequences will be shifted using shift_sequence_times and then
  merged together using the protobuf MergeFrom method. This means that any
  global values (e.g., ticks_per_quarter) will be overwritten by each sequence
  and only the final value will be used. After this, redundant data will be
  removed with remove_redundant_data.

  Args:
    sequences: A list of sequences to concatenate.
    sequence_durations: An optional list of sequence durations to use. If not
      specified, the total_time value will be used. Specifying durations is
      useful if the sequences to be concatenated are effectively longer than
      their total_time (e.g., a sequence that ends with a rest).

  Returns:
    A new sequence that is the result of concatenating *sequences.

  Raises:
    ValueError: If the length of sequences and sequence_durations do not match
        or if a specified duration is less than the total_time of the sequence.
  """
    if sequence_durations and len(sequences) != len(sequence_durations):
        raise ValueError(
            'sequences and sequence_durations must be the same length.')
    current_total_time = 0
    cat_seq = music_pb2.NoteSequence()
    for i in range(len(sequences)):
        sequence = sequences[i]
        if sequence_durations and sequence_durations[i] < sequence.total_time:
            raise ValueError(
                'Specified sequence duration ({}) must not be less than the '
                'total_time of the sequence ({})'.format(
                    sequence_durations[i], sequence.total_time))
        if current_total_time > 0:
            cat_seq.MergeFrom(
                shift_sequence_times(sequence, current_total_time))
        else:
            cat_seq.MergeFrom(sequence)

        if sequence_durations:
            current_total_time += sequence_durations[i]
        else:
            current_total_time = cat_seq.total_time

    # Delete subsequence_info because we've joined several subsequences.
    cat_seq.ClearField('subsequence_info')

    return remove_redundant_data(cat_seq)
コード例 #9
0
  def testToNoteSequence(self):
    converter = data.OneHotMelodyConverter(
        steps_per_quarter=1, slice_bars=4, max_tensors_per_notesequence=1)
    _, output_tensors = converter.to_tensors(
        filter_instrument(self.sequence, 0))
    sequences = converter.to_notesequences(output_tensors)

    self.assertEqual(1, len(sequences))
    expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
    expected_sequence.tempos.add(qpm=120)
    testing_lib.add_track_to_sequence(
        expected_sequence, 0,
        [(32, 80, 1.0, 2.0), (33, 80, 3.0, 5.5), (34, 80, 5.5, 6.5)])
    self.assertProtoEquals(expected_sequence, sequences[0])
コード例 #10
0
def create_example(filename):
    """Processes an audio file into an Example proto."""
    wav_data = tf.gfile.Open(filename, 'rb').read()
    example_list = list(
        audio_label_data_utils.process_record(
            wav_data=wav_data,
            ns=music_pb2.NoteSequence(),
            # decode to handle filenames with extended characters.
            example_id=six.ensure_text(filename, 'utf-8'),
            min_length=0,
            max_length=-1,
            allow_empty_notesequence=True))
    assert len(example_list) == 1
    return example_list[0].SerializeToString()
    def testPerformanceRnnPipeline(self):
        note_sequence = music_pb2.NoteSequence()
        magenta.music.testing_lib.add_track_to_sequence(
            note_sequence, 0, [(36, 100, 0.00, 2.0), (40, 55, 2.1, 5.0),
                               (44, 80, 3.6, 5.0), (41, 45, 5.1, 8.0),
                               (64, 100, 6.6, 10.0), (55, 120, 8.1, 11.0),
                               (39, 110, 9.6, 9.7), (53, 99, 11.1, 14.1),
                               (51, 40, 12.6, 13.0), (55, 100, 14.1, 15.0),
                               (54, 90, 15.6, 17.0), (60, 100, 17.1, 18.0)])

        pipeline_inst = performance_rnn_pipeline.get_pipeline(
            min_events=32, max_events=512, eval_ratio=0, config=self.config)
        result = pipeline_inst.transform(note_sequence)
        self.assertTrue(len(result['training_performances']))
コード例 #12
0
    def testAddChordsToSequence(self):
        note_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
        note_sequence.tempos.add(qpm=60.0)
        testing_lib.add_chords_to_sequence(note_sequence,
                                           [('N.C.', 0), ('C', 2), ('G7', 6)])
        note_sequence.total_time = 8.0

        expected_sequence = copy.deepcopy(note_sequence)
        del note_sequence.text_annotations[:]

        chords = [NO_CHORD, 'C', 'C', 'G7']
        chord_times = [0.0, 2.0, 4.0, 6.0]
        chords_lib.add_chords_to_sequence(note_sequence, chords, chord_times)

        self.assertEqual(expected_sequence, note_sequence)
コード例 #13
0
def run_with_flags(generator):
    FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
    primer_midi = None
    if FLAGS.primer_midi:
        primer_midi = os.path.expanduser(FLAGS.primer_midi)
    if not tf.gfile.Exists(FLAGS.output_dir):
        tf.gfile.MakeDirs(FLAGS.output_dir)
    primer_sequence = None
    qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE
    if FLAGS.primer_melody:
        primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
        primer_sequence = primer_melody.to_sequence(qpm=qpm)
        elif primer_midi:
            primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
            if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
                qpm = primer_sequence.tempos[0].qpm
            else:
                tf.logging.warning('No priming sequence specified.')
                primer_melody = magenta.music.Melody([60])
                primer_sequence = primer_melody.to_sequence(qpm=qpm)
                seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
                total_seconds = FLAGS.num_steps * seconds_per_step
                generator_options = generator_pb2.GeneratorOptions()
            if primer_sequence:
                input_sequence = primer_sequence
                last_end_time = (max(n.end_time for n in primer_sequence.notes)if primer_sequence.notes else 0)
                generate_section = generator_options.generate_sections.add(start_time=last_end_time + seconds_per_step,end_time=total_seconds)
            if generate_section.start_time >= generate_section.end_time:
                tf.logging.fatal('requested: Priming sequence length: %s, Generation length requested: %s',generate_section.start_time, total_seconds)
                return
            else:
                input_sequence = music_pb2.NoteSequence()
                input_sequence.tempos.add().qpm = qpm
                generate_section = generator_options.generate_sections.add(start_time=0,end_time=total_seconds)
                generator_options.args['temperature'].float_value = FLAGS.temperature
                generator_options.args['beam_size'].int_value = FLAGS.beam_size
                generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
                generator_options.args['steps_per_iteration'].int_value = FLAGS.steps_per_iteration
                tf.logging.debug('input_sequence: %s', input_sequence)
                tf.logging.debug('generator_options: %s', generator_options)
                date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
                digits = len(str(FLAGS.num_outputs))
                for i in range(FLAGS.num_outputs):
                    generated_sequence = generator.generate(input_sequence, generator_options)
                    midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
                    midi_path = os.path.join(FLAGS.output_dir, midi_filename)
                    magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)
                    tf.logging.info('Wrote %d MIDI files to %s',FLAGS.num_outputs, FLAGS.output_dir)
コード例 #14
0
ファイル: sequences_lib.py プロジェクト: xiangyuwei/magenta
def stretch_note_sequence(note_sequence, stretch_factor):
    """Apply a constant temporal stretch to a NoteSequence proto.

  Args:
    note_sequence: The NoteSequence to stretch.
    stretch_factor: How much to stretch the NoteSequence. Values greater than
        one increase the length of the NoteSequence (making it "slower"). Values
        less than one decrease the length of the NoteSequence (making it
        "faster").

  Returns:
    A stretched copy of the original NoteSequence.

  Raises:
    QuantizationStatusException: If the `note_sequence` is quantized. Only
        unquantized NoteSequences can be stretched.
  """
    if is_quantized_sequence(note_sequence):
        raise QuantizationStatusException(
            'Can only stretch unquantized NoteSequence.')

    stretched_sequence = music_pb2.NoteSequence()
    stretched_sequence.CopyFrom(note_sequence)

    if stretch_factor == 1.0:
        return stretched_sequence

    # Stretch all notes.
    for note in stretched_sequence.notes:
        note.start_time *= stretch_factor
        note.end_time *= stretch_factor
    stretched_sequence.total_time *= stretch_factor

    # Stretch all other event times.
    events = itertools.chain(stretched_sequence.time_signatures,
                             stretched_sequence.key_signatures,
                             stretched_sequence.tempos,
                             stretched_sequence.pitch_bends,
                             stretched_sequence.control_changes,
                             stretched_sequence.text_annotations)
    for event in events:
        event.time *= stretch_factor

    # Stretch tempos.
    for tempo in stretched_sequence.tempos:
        tempo.qpm /= stretch_factor

    return stretched_sequence
コード例 #15
0
ファイル: abc_parser.py プロジェクト: ryan-b2000/stardust
    def __init__(self, tune_lines):
        self._ns = music_pb2.NoteSequence()
        # Standard ABC fields.
        self._ns.source_info.source_type = (
            music_pb2.NoteSequence.SourceInfo.SCORE_BASED)
        self._ns.source_info.encoding_type = (
            music_pb2.NoteSequence.SourceInfo.ABC)
        self._ns.source_info.parser = (
            music_pb2.NoteSequence.SourceInfo.MAGENTA_ABC)
        self._ns.ticks_per_quarter = constants.STANDARD_PPQ

        self._current_time = 0
        self._accidentals = ABCTune._sig_to_accidentals(0)
        self._bar_accidentals = {}
        self._current_unit_note_length = None
        self._current_expected_repeats = None

        # Default dynamic should be !mf! as per:
        # http://abcnotation.com/wiki/abc:standard:v2.1#decorations
        self._current_velocity = ABCTune.DECORATION_TO_VELOCITY['!mf!']

        self._in_header = True
        self._header_tempo_unit = None
        self._header_tempo_rate = None
        for line in tune_lines:
            line = re.sub('%.*$', '', line)  # Strip comments.
            line = line.strip()  # Strip whitespace.
            if not line:
                continue

            # If the lines begins with a letter and a colon, it's an information
            # field. Extract it.
            info_field_match = ABCTune.INFORMATION_FIELD_PATTERN.match(line)
            if info_field_match:
                self._parse_information_field(info_field_match.group(1),
                                              info_field_match.group(2))
            else:
                if self._in_header:
                    self._set_values_from_header()
                    self._in_header = False
                self._parse_music_code(line)
        if self._in_header:
            self._set_values_from_header()

        self._finalize()

        if self._ns.notes:
            self._ns.total_time = self._ns.notes[-1].end_time
コード例 #16
0
  def testCaptureSequence_StopSignal(self):
    start_time = 1.0

    threading.Timer(0.1, self.send_capture_messages).start()

    captured_seq = self.midi_hub.capture_sequence(
        120, start_time,
        stop_signal=midi_hub.MidiSignal(type='control_change', control=1))

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = 6.0
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, 6)])
    self.assertProtoEquals(captured_seq, expected_seq)
コード例 #17
0
  def testNonEmptySequenceWithNoNotesToPrettyMidi_DropEventsAfterLastNote(self):
    source_sequence = music_pb2.NoteSequence()
    source_sequence.tempos.add(time=0, qpm=120)
    source_sequence.tempos.add(time=10, qpm=160)
    source_sequence.tempos.add(time=40, qpm=240)

    # Translate without dropping.
    translated_midi = midi_io.sequence_proto_to_pretty_midi(
        source_sequence)
    self.CheckPrettyMidiAndSequence(translated_midi, source_sequence)

    # Translate dropping anything after 30 seconds.
    translated_midi = midi_io.sequence_proto_to_pretty_midi(
        source_sequence, drop_events_n_seconds_after_last_note=30)
    del source_sequence.tempos[-1]
    self.CheckPrettyMidiAndSequence(translated_midi, source_sequence)
コード例 #18
0
  def testInferChordsForSequenceWithBeats(self):
    sequence = music_pb2.NoteSequence()
    testing_lib.add_track_to_sequence(
        sequence, 0,
        [(60, 100, 0.0, 1.1), (64, 100, 0.0, 1.1), (67, 100, 0.0, 1.1),   # C
         (62, 100, 1.1, 1.9), (65, 100, 1.1, 1.9), (69, 100, 1.1, 1.9),   # Dm
         (60, 100, 1.9, 3.0), (65, 100, 1.9, 3.0), (69, 100, 1.9, 3.0),   # F
         (59, 100, 3.0, 4.5), (62, 100, 3.0, 4.5), (67, 100, 3.0, 4.5)])  # G
    testing_lib.add_beats_to_sequence(sequence, [0.0, 1.1, 1.9, 1.9, 3.0])
    chord_inference.infer_chords_for_sequence(sequence)

    expected_chords = [('C', 0.0), ('Dm', 1.1), ('F', 1.9), ('G', 3.0)]
    chords = [(ta.text, ta.time) for ta in sequence.text_annotations
              if ta.annotation_type == CHORD_SYMBOL]

    self.assertEqual(expected_chords, chords)
コード例 #19
0
ファイル: midi_hub.py プロジェクト: Tiasa/CreativeComposer
 def __init__(self, qpm, start_time=0, stop_time=None, stop_signal=None):
     # A lock for synchronization.
     self._lock = threading.RLock()
     self._receive_queue = Queue.Queue()
     self._captured_sequence = music_pb2.NoteSequence()
     self._captured_sequence.tempos.add(qpm=qpm)
     self._start_time = start_time
     self._stop_time = stop_time
     self._stop_regex = re.compile(str(stop_signal))
     # A set of active MidiSignals being used by iterators.
     self._iter_signals = []
     # An event that is set when `stop` has been called.
     self._stop_signal = threading.Event()
     # Active callback threads keyed by unique thread name.
     self._callbacks = {}
     super(MidiCaptor, self).__init__()
コード例 #20
0
  def testSimpleSequenceToPrettyMidi_DefaultTicksAndTempo(self):
    source_midi = pretty_midi.PrettyMIDI(self.midi_simple_filename)
    stripped_sequence_proto = midi_io.midi_to_sequence_proto(source_midi)
    del stripped_sequence_proto.tempos[:]
    stripped_sequence_proto.ClearField('ticks_per_quarter')

    expected_sequence_proto = music_pb2.NoteSequence()
    expected_sequence_proto.CopyFrom(stripped_sequence_proto)
    expected_sequence_proto.tempos.add(
        qpm=constants.DEFAULT_QUARTERS_PER_MINUTE)
    expected_sequence_proto.ticks_per_quarter = constants.STANDARD_PPQ

    translated_midi = midi_io.sequence_proto_to_pretty_midi(
        stripped_sequence_proto)

    self.CheckPrettyMidiAndSequence(translated_midi, expected_sequence_proto)
コード例 #21
0
  def testNoteSequenceRecordWriterAndIterator(self):
    sequences = []
    for i in xrange(4):
      sequence = music_pb2.NoteSequence()
      sequence.id = str(i)
      sequence.notes.add().pitch = i
      sequences.append(sequence)

    with tempfile.NamedTemporaryFile(prefix='NoteSequenceIoTest') as temp_file:
      with note_sequence_io.NoteSequenceRecordWriter(temp_file.name) as writer:
        for sequence in sequences:
          writer.write(sequence)

      for i, sequence in enumerate(
          note_sequence_io.note_sequence_record_iterator(temp_file.name)):
        self.assertEquals(sequence, sequences[i])
コード例 #22
0
def run_conversion(encoder,
                   sequences_file,
                   train_output,
                   eval_output='',
                   eval_ratio=0.0):
    """Loop that converts NoteSequence protos to SequenceExample protos.

  Args:
    encoder: String name of encoder function from encoders.py to use.
    sequences_file: String path pointing to TFRecord file of NoteSequence
        protos.
    train_output: String path to TFRecord file that training samples will be
        saved to.
    eval_output: If set, string path to TFRecord file that evaluation samples
        will be saved to. Omit this argument to not produce an eval set.
    eval_ratio: Fraction of input that will be saved to eval set. A random
        partition is chosen, so the actual train/eval ratio will vary.
  """
    encoder_func = getattr(encoders, encoder)

    reader = tf.python_io.tf_record_iterator(sequences_file)
    train_writer = tf.python_io.TFRecordWriter(train_output)
    eval_writer = (tf.python_io.TFRecordWriter(eval_output)
                   if eval_output else None)

    input_count = 0
    train_output_count = 0
    eval_output_count = 0
    for buf in reader:
        sequence_data = music_pb2.NoteSequence()
        sequence_data.ParseFromString(buf)
        extracted_melodies = melodies_lib.extract_melodies(sequence_data)
        for melody in extracted_melodies:
            sequence_example, _ = encoder_func(melody)
            serialized = sequence_example.SerializeToString()
            if eval_writer and random.random() < eval_ratio:
                eval_writer.write(serialized)
                eval_output_count += 1
            else:
                train_writer.write(serialized)
                train_output_count += 1
        input_count += 1

    logging.info('Found %d sequences', input_count)
    logging.info('Extracted %d melodies for training', train_output_count)
    if eval_writer:
        logging.info('Extracted %d melodies for evaluation', eval_output_count)
コード例 #23
0
def performance_rnn(record_file):
    start_time = 0.0
    timestep = 0.5
    end_time = 0.5
    melody_sequence = music_pb2.NoteSequence()
    with open(record_file, 'r') as record:
        for line in record:
            melody_sequence.notes.add(pitch=pitch_dict[line.strip()], \
                                start_time=start_time, end_time=end_time, velocity=80)
            start_time += timestep
            end_time += timestep
    melody_sequence.total_time = end_time
    melody_sequence.tempos.add(qpm=60)

    input_sequence = melody_sequence
    num_steps = 8192  # change this for shorter or longer sequences
    temperature = 1.0  # the higher the temperature the more random the sequence.

    bundle = mm.sequence_generator_bundle.read_bundle_file(
        '/home/ubuntu/team15/bundle/performance_with_dynamics.mag')
    generator_map = performance_sequence_generator.get_generator_map()
    generator = generator_map['performance_with_dynamics'](checkpoint=None,
                                                           bundle=bundle)
    generator.initialize()

    # Derive the total number of seconds to generate.
    seconds_per_step = 1.0 / generator.steps_per_second
    generate_end_time = num_steps * seconds_per_step

    # Specify start/stop time for generation based on starting generation at the
    # end of the priming sequence and continuing until the sequence is num_steps
    # long.
    generator_options = generator_pb2.GeneratorOptions()
    # Set the start time to begin when the last note ends.
    generate_section = generator_options.generate_sections.add(
        start_time=input_sequence.total_time, end_time=generate_end_time)

    generator_options.args[
        'temperature'].float_value = 1.0  # Higher is more random; 1.0 is default.

    sequence = generator.generate(input_sequence, generator_options)

    new_file_name = record_file.split('/')[-1].split(
        '.')[0] + '_performance_rnn.mid'
    mm.sequence_proto_to_midi_file(
        sequence, '/home/ubuntu/team15/midi/performance_rnn/' + new_file_name)
    return new_file_name
コード例 #24
0
    def setUp(self):
        sequence = music_pb2.NoteSequence()
        sequence.tempos.add(qpm=60)
        testing_lib.add_track_to_sequence(sequence, 0, [(32, 100, 2, 4),
                                                        (33, 1, 6, 11),
                                                        (34, 1, 11, 13),
                                                        (35, 1, 17, 19)])
        testing_lib.add_track_to_sequence(sequence, 1, [(35, 127, 2, 4),
                                                        (36, 50, 6, 8),
                                                        (71, 100, 33, 37),
                                                        (73, 100, 34, 37),
                                                        (33, 1, 50, 55),
                                                        (34, 1, 55, 56)])
        self.sequence = sequence

        # Subtract min pitch (21).
        expected_unsliced_events = [
            (NO_EVENT, NO_EVENT, 11, NO_EVENT, NOTE_OFF, NO_EVENT, 12,
             NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT, 13, NO_EVENT, NOTE_OFF,
             NO_EVENT, NO_EVENT),
            (NO_EVENT, 14, NO_EVENT, NOTE_OFF),
            (NO_EVENT, NO_EVENT, 14, NO_EVENT, NOTE_OFF, NO_EVENT, 15,
             NO_EVENT),
            (NO_EVENT, 50, 52, NO_EVENT, NO_EVENT, NOTE_OFF, NO_EVENT,
             NO_EVENT),
            (NO_EVENT, NO_EVENT, 12, NO_EVENT, NO_EVENT, NO_EVENT, NO_EVENT,
             13),
        ]
        self.expected_unsliced_labels = [
            np.array(es) + 2 for es in expected_unsliced_events
        ]

        expected_sliced_events = [(NO_EVENT, NO_EVENT, 11, NO_EVENT, NOTE_OFF,
                                   NO_EVENT, 12, NO_EVENT),
                                  (NO_EVENT, NO_EVENT, 12, NO_EVENT, NO_EVENT,
                                   NO_EVENT, NO_EVENT, 13),
                                  (NO_EVENT, NO_EVENT, NO_EVENT, 13, NO_EVENT,
                                   NOTE_OFF, NO_EVENT, NO_EVENT),
                                  (NO_EVENT, NO_EVENT, 14, NO_EVENT, NOTE_OFF,
                                   NO_EVENT, 15, NO_EVENT),
                                  (NO_EVENT, 50, 52, NO_EVENT, NO_EVENT,
                                   NOTE_OFF, NO_EVENT, NO_EVENT)]
        self.expected_sliced_labels = [
            np.array(es) + 2 for es in expected_sliced_events
        ]

        self.converter_class = data.OneHotMelodyConverter
コード例 #25
0
  def testCaptureSequence_StopTime(self):
    start_time = 1.0
    stop_time = time.time() + 1.0

    self.capture_messages[-1].time += time.time()
    threading.Timer(0.1, self.send_capture_messages).start()

    captured_seq = self.midi_hub.capture_sequence(
        120, start_time, stop_time=stop_time)

    expected_seq = music_pb2.NoteSequence()
    expected_seq.tempos.add(qpm=120)
    expected_seq.total_time = stop_time
    testing_lib.add_track_to_sequence(
        expected_seq, 0,
        [Note(1, 64, 2, 5), Note(2, 64, 3, 4), Note(3, 64, 4, stop_time)])
    self.assertProtoEquals(captured_seq, expected_seq)
コード例 #26
0
def write_to_file(file):
    fp = open("pitch_mond2.txt", "w+")
    ft = open("tempo_mond2.txt", "w+")
    fp.write('pitch,start_time,end_time,time_diff,note_len\n')
    # fp = open("velocity_mond2.txt","w+")
    # fp.write('pitch,start_time,end_time,velocity\n')
    sequence = music_pb2.NoteSequence()
    for i, sequence in enumerate(
            note_sequence_io.note_sequence_record_iterator(file)):
        for tempo in sequence.tempos:
            ft.write('time:{}\n'.format(tempo.time))
            ft.write('qpm:{}\n'.format(tempo.qpm))
        for note in sequence.notes:
            fp.write('{},{},{},{},{}/{}\n'.format(
                note.pitch, note.start_time, note.end_time,
                note.end_time - note.start_time, note.numerator,
                note.denominator))
コード例 #27
0
    def testToNoteSequence(self):
        converter = data.DrumsConverter(steps_per_quarter=1,
                                        slice_bars=2,
                                        max_tensors_per_notesequence=1)
        tensors = converter.to_tensors(filter_instrument(self.sequence, 1))
        sequences = converter.to_notesequences(tensors.outputs)

        self.assertEqual(1, len(sequences))
        expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
        expected_sequence.tempos.add(qpm=120)
        testing_lib.add_track_to_sequence(expected_sequence,
                                          9, [(38, 80, 0.5, 1.0),
                                              (48, 80, 2.0, 2.5),
                                              (49, 80, 2.0, 2.5),
                                              (51, 80, 3.5, 4.0)],
                                          is_drum=True)
        self.assertProtoEquals(expected_sequence, sequences[0])
コード例 #28
0
    def testSequenceToPianoroll(self):
        sequence = music_pb2.NoteSequence(total_time=1.21)
        testing_lib.add_track_to_sequence(sequence, 0, [(1, 100, 0.11, 1.01),
                                                        (2, 55, 0.22, 0.50),
                                                        (3, 100, 0.3, 0.8),
                                                        (2, 45, 1.0, 1.21)])

        expected_pianoroll = [[0, 0], [1, 0], [1, 1], [1, 1], [1, 1], [1, 0],
                              [1, 0], [1, 0], [1, 0], [1, 0], [1, 1], [0, 1],
                              [0, 1]]

        output, _, _, _ = data.sequence_to_pianoroll(sequence,
                                                     frames_per_second=10,
                                                     min_pitch=1,
                                                     max_pitch=2)

        np.testing.assert_allclose(expected_pianoroll, output)
コード例 #29
0
def inference(filename):
    # 오디오 파일(.wav) 읽기
    wav_file = open(filename, mode='rb')
    wav_data = wav_file.read()
    wav_file.close()
    
    print('User uploaded file "{name}" with length {length} bytes'.format(name=filename, length=len(wav_data)))

    # 청크로 분할 후 protobufs 포맷으로 데이터 생성
    to_process = []
    example_list = list(
    audio_label_data_utils.process_record(wav_data=wav_data, ns=music_pb2.NoteSequence(),
        example_id=filename, min_length=0, max_length=-1, allow_empty_notesequence=True))
    
    # Serialize
    to_process.append(example_list[0].SerializeToString())

    # 세션 실행
    sess.run(iterator.initializer, {examples: to_process})

    # 예측
    prediction_list = list(estimator.predict(input_fn, yield_single_examples=False))
    assert len(prediction_list) == 1

    # 예측 결과 데이터 가져오기
    frame_predictions = prediction_list[0]['frame_predictions'][0]
    onset_predictions = prediction_list[0]['onset_predictions'][0]
    velocity_values = prediction_list[0]['velocity_values'][0]

    # 예측 결과 데이터를 이용해서 미디 시퀀스 생성
    sequence_prediction = sequences_lib.pianoroll_to_note_sequence(
        frame_predictions,
        frames_per_second=data.hparams_frames_per_second(hparams),
        min_duration_ms=0,
        min_midi_pitch=constants.MIN_MIDI_PITCH,
        onset_predictions=onset_predictions,
        velocity_values=velocity_values)

    basename = os.path.split(os.path.splitext(filename)[0])[1] + '.mid'
    output_filename = os.path.join(env.MIDI_DIRECTORY, basename)

    # 미디 시퀀스를 파일로 내보내기
    midi_filename = (output_filename)
    midi_io.sequence_proto_to_midi_file(sequence_prediction, midi_filename)

    return basename
コード例 #30
0
  def testInferChordsForSequence(self):
    sequence = music_pb2.NoteSequence()
    testing_lib.add_track_to_sequence(
        sequence, 0,
        [(60, 100, 0.0, 1.0), (64, 100, 0.0, 1.0), (67, 100, 0.0, 1.0),   # C
         (62, 100, 1.0, 2.0), (65, 100, 1.0, 2.0), (69, 100, 1.0, 2.0),   # Dm
         (60, 100, 2.0, 3.0), (65, 100, 2.0, 3.0), (69, 100, 2.0, 3.0),   # F
         (59, 100, 3.0, 4.0), (62, 100, 3.0, 4.0), (67, 100, 3.0, 4.0)])  # G
    quantized_sequence = sequences_lib.quantize_note_sequence(
        sequence, steps_per_quarter=4)
    chord_inference.infer_chords_for_sequence(
        quantized_sequence, chords_per_bar=2)

    expected_chords = [('C', 0.0), ('Dm', 1.0), ('F', 2.0), ('G', 3.0)]
    chords = [(ta.text, ta.time) for ta in quantized_sequence.text_annotations]

    self.assertEqual(expected_chords, chords)