Exemple #1
0
import numpy as np
import pretty_midi as pm


def to_piano_roll(pm_container, fs=8):
    roll = np.copy(pm_container.get_piano_roll(fs=fs).T)

    # transform note velocities into 1s
    roll = (roll > 0).astype(float)

    # remove empty beginning
    for i, col in enumerate(roll):
        if col.sum() != 0:
            break
    roll = roll[i:]

    if roll.sum() == 0:
        raise Exception("Roll is empty")

    return roll

if __name__ == "__main__":
    pm_container = pm.PrettyMIDI("data/Pop_Music_Midi/Around The World - Verse.midi")
    del pm_container.instruments[1]
    roll = to_piano_roll(pm_container)
    np.save("test.npy", roll)
Exemple #2
0
def sequence_proto_to_pretty_midi(sequence):
    """Convert tensorflow.magenta.NoteSequence proto to a PrettyMIDI.

  Time is stored in the NoteSequence in absolute values (seconds) as opposed to
  relative values (MIDI ticks). When the NoteSequence is translated back to
  PrettyMIDI the absolute time is retained. The tempo map is also recreated.

  Args:
    sequence: A tensorfow.magenta.NoteSequence proto.

  Returns:
    A pretty_midi.PrettyMIDI object or None if sequence could not be decoded.
  """

    kwargs = {}
    if sequence.tempos and sequence.tempos[0].time == 0:
        kwargs['initial_tempo'] = sequence.tempos[0].bpm
    pm = pretty_midi.PrettyMIDI(resolution=sequence.ticks_per_beat, **kwargs)

    # Create an empty instrument to contain time and key signatures.
    instrument = pretty_midi.Instrument(0)
    pm.instruments.append(instrument)

    # Populate time signatures.
    for seq_ts in sequence.time_signatures:
        time_signature = pretty_midi.containers.TimeSignature(
            seq_ts.numerator, seq_ts.denominator, seq_ts.time)
        pm.time_signature_changes.append(time_signature)

    # Populate key signatures.
    for seq_key in sequence.key_signatures:
        key_number = seq_key.key
        if seq_key.mode == seq_key.MINOR:
            key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET
        key_signature = pretty_midi.containers.KeySignature(
            key_number, seq_key.time)
        pm.key_signature_changes.append(key_signature)

    # Populate tempo. The first tempo change was done in PrettyMIDI constructor.
    # TODO(@douglaseck): Update this code if pretty_midi adds the ability to
    # write tempo.
    if len(sequence.tempos) > 1:
        for seq_tempo in sequence.tempos[1:]:
            tick_scale = 60.0 / (pm.resolution * seq_tempo.bpm)
            tick = pm.time_to_tick(seq_tempo.time)
            # pylint: disable=protected-access
            pm._PrettyMIDI__tick_scales.append((tick, tick_scale))
            # pylint: enable=protected-access

    # Populate instrument events by first gathering notes and other event types
    # in lists then write them sorted to the PrettyMidi object.
    instrument_events = defaultdict(lambda: defaultdict(list))
    for seq_note in sequence.notes:
        instrument_events[(seq_note.instrument,
                           seq_note.program)]['notes'].append(
                               pretty_midi.Note(seq_note.velocity,
                                                seq_note.pitch,
                                                seq_note.start_time,
                                                seq_note.end_time))
    for seq_bend in sequence.pitch_bends:
        instrument_events[(seq_bend.instrument,
                           seq_bend.program)]['bends'].append(
                               pretty_midi.PitchBend(seq_bend.bend,
                                                     seq_bend.time))
    for seq_cc in sequence.control_changes:
        instrument_events[(seq_cc.instrument,
                           seq_cc.program)]['controls'].append(
                               pretty_midi.ControlChange(
                                   seq_cc.control_number, seq_cc.control_value,
                                   seq_cc.time))

    for (instr_id, prog_id) in sorted(instrument_events.keys()):
        # For instr_id 0 append to the instrument created above.
        if instr_id > 0:
            instrument = pretty_midi.Instrument(prog_id,
                                                is_drum=(instr_id == 9))
            pm.instruments.append(instrument)
        instrument.program = prog_id
        instrument.notes = instrument_events[(instr_id, prog_id)]['notes']
        instrument.pitch_bends = instrument_events[(instr_id,
                                                    prog_id)]['bends']
        instrument.control_changes = instrument_events[(instr_id,
                                                        prog_id)]['controls']

    return pm
Exemple #3
0
def build_dataset(data_dir, model):

    np.random.seed(1)

    calls = []
    responses = []

    for dirName, subdirList, fileList in walk(data_dir):
        cur_cr = {}
        for fname in fileList:
            if fname.endswith('.mid') and not fname[0] == '.':
                try:
                    num = int(fname.replace('.mid', '').replace(' ', '_').split('_')[-1])
                except ValueError:
                    print(f'file name does not end with integer {fname}')
                    continue

                full_path = join(dirName, fname)
                try:
                    midi = pm.PrettyMIDI(full_path)
                    assert len(midi.instruments[0].notes) > 3
                    cur_cr[num] = model.encoder.encode(midi, instrument_index=0)
                    #print("encode", cur_cr[num])
                except (AssertionError, IOError, KeyError):
                    print(f'Could not add midi file {join(dirName, fname)}.')

        for k in cur_cr.keys():
            if k % 2 == 1:
                try:
                    cur_call = cur_cr[k]
                    cur_response = cur_cr[k + 1]
                    responses.append(cur_response)
                    calls.append(cur_call)
                    #print("calls", calls)
                except KeyError:
                    print(f'Could not add index {k} from {dirName} because '
                                    f'it does not have a valid call/response.')

    print(f'building dataset from {data_dir}')

    call_len = calls[0].shape[0]
    print("len", call_len)
    for call, response in zip(calls, responses):
        assert call.shape[0] == call_len
        assert response.shape[0] == call_len

    num_examples = len(calls)
    print(num_examples)
    validation_ratio = 0  # use all midi's for evaluation
    num_training_examples = int(num_examples*(1.0-validation_ratio))
    indices = np.arange(num_examples, dtype=np.int32)
    #np.random.shuffle(indices)

    dataset = {
        'calls': np.swapaxes(np.stack(calls), 0, 1),
        'responses': np.swapaxes(np.stack(responses), 0, 1),
        'training_indices': indices[:num_training_examples],
        'validation_indices': indices[num_training_examples:]
    }

    return dataset
Exemple #4
0
                        default=N_NOTES,
                        type=int,
                        action='store',
                        help='Number of notes in each CQT')
    parser.add_argument('--penalty',
                        default=None,
                        type=float,
                        action='store',
                        help='DTW non-diagonal move penalty.  '
                        'By default, uses the mean of the distance matrix.')

    parameters = vars(parser.parse_args(sys.argv[1:]))
    print("Loading {} ...".format(parameters['audio_file']))
    audio_data, _ = librosa.load(parameters['audio_file'], sr=parameters['fs'])
    print("Loading {} ...".format(parameters['midi_file']))
    midi_object = pretty_midi.PrettyMIDI(parameters['midi_file'])
    print("Aligning {} to {} ...".format(parameters['audio_file'],
                                         parameters['midi_file']))
    align(midi_object, audio_data, parameters['fs'], parameters['hop'],
          parameters['note_start'], parameters['n_notes'],
          parameters['penalty'])
    print("Writing {} ...".format(parameters['output_file']))
    midi_object.write(parameters['output_file'])
    if parameters['output_audio']:
        print("Writing {} ...".format(parameters['output_audio']))
        # Re-synthesize the aligned mIDI
        midi_audio_aligned = midi_object.fluidsynth(fs=parameters['fs'])
        # Adjust to the same size as audio
        if midi_audio_aligned.shape[0] > audio_data.shape[0]:
            midi_audio_aligned = midi_audio_aligned[:audio_data.shape[0]]
        else:
Exemple #5
0
        # Save
        filename = f"file{file_id}-ins{i}-notes{len(midi.instruments[0].notes)}-pitch{len(midi.instruments[0].pitch_bends)}-controls{len(midi.instruments[0].control_changes)}"
        print(f"\t\tSaving instrument: {filename}")
        midi.write(
            f"{output_dir}/{filename}-ori_len{int(last_end)}-new_len{int(last_end-shift)}.mid"
        )
    except Exception as e:
        print(f"Exception while cleaning instrument: {str(e)}")


# for k, file in enumerate(midi_files[:100]):
for k, file in enumerate(midi_files):
    print(f"Progress: {k}/{len(midi_files)} - Midi file: {file}")
    file_id = file[file.find("\\") + 1:-4]

    pm = pretty_midi.PrettyMIDI(file)
    pm.remove_invalid_notes()

    if pm.instruments[0].is_drum:
        print("Track is drum! Skipping")
        continue

    # CLEAN: Seperate instruments and adjust timing
    for i, instrument in enumerate(pm.instruments):
        if len(instrument.notes) > 1000 and not instrument.is_drum:
            # Save new instrument
            save_cleaned_instrument(instrument)

t = 2
Exemple #6
0
    def get_time():
        return time.time() - self.start

    def __enter__(self):
        self.start = time.time()
        return self

    def __exit__(self, _1, _2, _3):
        end = time.time() - self.start
        print(self.fmt.format(end))


if __name__ == "__main__":
    # Test
    resolution = 24
    pm = pretty_midi.PrettyMIDI(initial_tempo=120)
    pm.time_signature_changes.append(TimeSignature(4, 4, 0.0))
    pm.time_signature_changes.append(TimeSignature(3, 4, (60 / 120) * 4))
    pm.time_signature_changes.append(TimeSignature(4, 4, (60 / 120) * 7))
    print([(f"{ts.numerator}/{ts.denominator}", ts.time)
           for ts in pm.time_signature_changes])

    print("head of bar[1]:", steps_to_bars(pm, resolution * 4))
    print("one step before head of bar[1]:",
          steps_to_bars(pm, resolution * 4 - 1))
    print("head of bar[2]:", steps_to_bars(pm, resolution * 7))
    print("one step before head of bar[2]:",
          steps_to_bars(pm, resolution * 7 - 1))
    print("head of bar[3]:", steps_to_bars(pm, resolution * 11))
    print("one step before head of bar[3]:",
          steps_to_bars(pm, resolution * 11 - 1))
Exemple #7
0
def note_sequence_to_pretty_midi(sequence,
                                 drop_events_n_seconds_after_last_note=None):
    """Convert NoteSequence to a PrettyMIDI.

  Time is stored in the NoteSequence in absolute values (seconds) as opposed to
  relative values (MIDI ticks). When the NoteSequence is translated back to
  PrettyMIDI the absolute time is retained. The tempo map is also recreated.

  Args:
    sequence: A NoteSequence.
    drop_events_n_seconds_after_last_note: Events (e.g., time signature changes)
        that occur this many seconds after the last note will be dropped. If
        None, then no events will be dropped.

  Returns:
    A pretty_midi.PrettyMIDI object or None if sequence could not be decoded.
  """
    ticks_per_quarter = sequence.ticks_per_quarter or constants.STANDARD_PPQ

    max_event_time = None
    if drop_events_n_seconds_after_last_note is not None:
        max_event_time = (max([n.end_time for n in sequence.notes] or [0]) +
                          drop_events_n_seconds_after_last_note)

    # Try to find a tempo at time zero. The list is not guaranteed to be in order.
    initial_seq_tempo = None
    for seq_tempo in sequence.tempos:
        if seq_tempo.time == 0:
            initial_seq_tempo = seq_tempo
            break

    kwargs = {}
    if initial_seq_tempo:
        kwargs['initial_tempo'] = initial_seq_tempo.qpm
    else:
        kwargs['initial_tempo'] = constants.DEFAULT_QUARTERS_PER_MINUTE

    pm = pretty_midi.PrettyMIDI(resolution=ticks_per_quarter, **kwargs)

    # Create an empty instrument to contain time and key signatures.
    instrument = pretty_midi.Instrument(0)
    pm.instruments.append(instrument)

    # Populate time signatures.
    for seq_ts in sequence.time_signatures:
        if max_event_time and seq_ts.time > max_event_time:
            continue
        time_signature = pretty_midi.containers.TimeSignature(
            seq_ts.numerator, seq_ts.denominator, seq_ts.time)
        pm.time_signature_changes.append(time_signature)

    # Populate key signatures.
    for seq_key in sequence.key_signatures:
        if max_event_time and seq_key.time > max_event_time:
            continue
        key_number = seq_key.key
        if seq_key.mode == seq_key.MINOR:
            key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET
        key_signature = pretty_midi.containers.KeySignature(
            key_number, seq_key.time)
        pm.key_signature_changes.append(key_signature)

    # Populate tempos.
    # TODO(douglaseck): Update this code if pretty_midi adds the ability to
    # write tempo.
    for seq_tempo in sequence.tempos:
        # Skip if this tempo was added in the PrettyMIDI constructor.
        if seq_tempo == initial_seq_tempo:
            continue
        if max_event_time and seq_tempo.time > max_event_time:
            continue
        tick_scale = 60.0 / (pm.resolution * seq_tempo.qpm)
        tick = pm.time_to_tick(seq_tempo.time)
        # pylint: disable=protected-access
        pm._tick_scales.append((tick, tick_scale))
        pm._update_tick_to_time(0)
        # pylint: enable=protected-access

    # Populate instrument names by first creating an instrument map between
    # instrument index and name.
    # Then, going over this map in the instrument event for loop
    inst_infos = {}
    for inst_info in sequence.instrument_infos:
        inst_infos[inst_info.instrument] = inst_info.name

    # Populate instrument events by first gathering notes and other event types
    # in lists then write them sorted to the PrettyMidi object.
    instrument_events = collections.defaultdict(
        lambda: collections.defaultdict(list))
    for seq_note in sequence.notes:
        instrument_events[(seq_note.instrument, seq_note.program,
                           seq_note.is_drum)]['notes'].append(
                               pretty_midi.Note(seq_note.velocity,
                                                seq_note.pitch,
                                                seq_note.start_time,
                                                seq_note.end_time))
    for seq_bend in sequence.pitch_bends:
        if max_event_time and seq_bend.time > max_event_time:
            continue
        instrument_events[(seq_bend.instrument, seq_bend.program,
                           seq_bend.is_drum)]['bends'].append(
                               pretty_midi.PitchBend(seq_bend.bend,
                                                     seq_bend.time))
    for seq_cc in sequence.control_changes:
        if max_event_time and seq_cc.time > max_event_time:
            continue
        instrument_events[(seq_cc.instrument, seq_cc.program,
                           seq_cc.is_drum)]['controls'].append(
                               pretty_midi.ControlChange(
                                   seq_cc.control_number, seq_cc.control_value,
                                   seq_cc.time))
    for ta in sequence.text_annotations:
        from magenta.music.chords_lib import CHORD_SYMBOL
        if ta.annotation_type == CHORD_SYMBOL and ta.text != constants.NO_CHORD:
            pm.lyrics.append(pretty_midi.Lyric(ta.text, ta.time))
        # timing_track.append(mido.MetaMessage(
        #   'end_of_track', time=timing_track[-1].time + 1))

    for (instr_id, prog_id, is_drum) in sorted(instrument_events.keys()):
        # For instr_id 0 append to the instrument created above.
        if instr_id > 0:
            instrument = pretty_midi.Instrument(prog_id, is_drum)
            pm.instruments.append(instrument)
        else:
            instrument.is_drum = is_drum
        # propagate instrument name to the midi file
        instrument.program = prog_id
        if instr_id in inst_infos:
            instrument.name = inst_infos[instr_id]
        instrument.notes = instrument_events[(instr_id, prog_id,
                                              is_drum)]['notes']
        instrument.pitch_bends = instrument_events[(instr_id, prog_id,
                                                    is_drum)]['bends']
        instrument.control_changes = instrument_events[(instr_id, prog_id,
                                                        is_drum)]['controls']

    return pm
Exemple #8
0
def m2c_generator(max_num_sample):
    '''
        m2c Generator 
        Input  : a testing sample index 
        Output : Chord Label (n, 16)
                 Monophony Melody Label (n, 2)
                 BPM float 
        Average Elasped Time for one sample : 0.16 sec 
    '''
    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    cpu_device = torch.device('cpu')

    # Load Data
    chord_dic = pd.read_pickle(CONFIG_ALL['data']['chord_dic'])

    # prepare features
    all_files = find_files(CONFIG_ALL['data']['test_dir'], '*.mid')
    input_dic = []
    for i_file in all_files:
        _ = midi_feature(i_file, sampling_fac=2)
        _ = np.reshape(_, (1, _.shape[0], _.shape[1]))
        input_dic.append({'midi': i_file, 'm_embed': _})
    print 'Total Number of files : ', len(input_dic)

    # training
    model = BiRNN(CONFIG_ALL['model']['input_size'],
                  CONFIG_ALL['model']['lstm_hidden_size'],
                  CONFIG_ALL['model']['fc_hidden_size'],
                  CONFIG_ALL['model']['num_layers'],
                  CONFIG_ALL['model']['num_classes_cf'],
                  CONFIG_ALL['model']['num_classes_c'], device).to(device)

    # Load Model
    path = os.path.join(CONFIG_ALL['model']['log_dir'],
                        CONFIG_ALL['model']['exp_name'], 'models/',
                        CONFIG_ALL['model']['eval_model'])
    model.load_state_dict(torch.load(path))

    # Test the model
    with torch.no_grad():
        while True:
            test_idx = yield

            if test_idx >= max_num_sample or test_idx < 0:
                print "Invalid sample index"
                continue
            m_embedding = input_dic[test_idx]['m_embed']
            out_cf, out_c = model(
                torch.tensor(m_embedding, dtype=torch.float).to(device))

            out_c = out_c.data.cpu().numpy()

            _, pred_cf = torch.max(out_cf.data, 1)
            pred_cf = pred_cf.data.cpu().numpy()

            i_out_tn1 = -1
            i_out_tn2 = -1
            i_out_tn3 = -1
            i_out_t = -1

            predicted = []
            c_threshold = 0.825
            f_threshold = 0.35
            #ochord_threshold = 1.0

            for idx, i_out in enumerate(out_c):
                # Seventh chord
                #T_chord_label = [0, 1, 2, 3, 4, 5, 102, 103, 104]
                #D_chord_label = [77, 78, 79, 55, 56, 57]
                #R_chord_label = [132]

                # Triad Chord
                T_chord_label = [0, 1, 37]
                D_chord_label = [20, 28]
                R_chord_label = [48]

                O_chord_label = [
                    i for i in range(0, 48) if not (i in T_chord_label) or (
                        i in D_chord_label) or (i in R_chord_label)
                ]

                # Bean Search for repeated note
                if pred_cf[idx] == 0:
                    L = np.argsort(
                        -np.asarray([i_out[i] for i in T_chord_label]))
                    if i_out_tn1 == T_chord_label[
                            L[0]] and i_out_tn2 == T_chord_label[L[0]]:
                        i_out_t = T_chord_label[L[1]]
                    else:
                        i_out_t = T_chord_label[L[0]]

                elif pred_cf[idx] == 1:
                    i_out_t = D_chord_label[np.argmax(
                        [i_out[i] for i in D_chord_label])]

                elif pred_cf[idx] == 3:
                    L = np.argsort(
                        -np.asarray([i_out[i] for i in O_chord_label]))
                    if i_out_tn1 == O_chord_label[
                            L[0]] and i_out_tn2 == O_chord_label[L[0]]:
                        i_out_t = O_chord_label[L[1]]
                    else:
                        i_out_t = O_chord_label[L[0]]

                else:
                    i_out_t = 48

                predicted.append(i_out_t)
                i_out_tn2 = i_out_tn1
                i_out_tn1 = i_out_t
                i_out_last = i_out

            # Write file to midi
            midi_original = pretty_midi.PrettyMIDI(input_dic[test_idx]['midi'])
            midi_chord = pro_chordlabel_to_midi(
                predicted,
                chord_dic,
                inv_beat_resolution=CONFIG_ALL['data']['chord_resolution'],
                constant_tempo=midi_original.get_tempo_changes()[1])
            midi_chord.instruments[0].name = "Predicted_w_func"
            midi_original.instruments.append(midi_chord.instruments[0])

            out_path = os.path.join('eval_test/', str(test_idx) + '.mid')
            ensure_dir(out_path)
            midi_original.write(out_path)
            print "Write Files to : ", out_path

            out_mc = midi_to_list(midi_original, predicted)

            yield {
                'melody': out_mc['melody'],
                'chord': out_mc['chord'],
                'BPM': float(midi_original.get_tempo_changes()[1])
            }
Exemple #9
0
def interpolation(args, model, dataset, fs=25, program=0):
    x_a, x_b = dataset[random.randint(
        0,
        len(dataset) - 1)], dataset[random.randint(0,
                                                   len(dataset) - 1)]
    x_a, x_b = x_a.to(args.device), x_b.to(args.device)
    # Encode samples to the latent space
    z_a, z_b = model.encode(x_a.unsqueeze(0)), model.encode(x_b.unsqueeze(0))
    # Run through alpha values
    interp = []
    alpha_values = np.linspace(0, 1, args.n_steps)
    for alpha in alpha_values:
        z_interp = (1 - alpha) * z_a[0] + alpha * z_b[0]
        interp.append(model.decode(z_interp))
    # Draw interpolation step by step
    i = 0
    stack_interp = []
    for step in interp:
        if args.num_classes > 1:
            step = torch.argmax(step[0], dim=0)
        stack_interp.append(step)
        # plt.matshow(step.cpu().detach(), alpha=1)
        # plt.title("Interpolation " + str(i))
        # plt.savefig(args.figures_path + "interpolation" + str(i) + ".png")
        # plt.close()
        i += 1
    stack_interp = torch.cat(stack_interp, dim=1)
    # Draw stacked interpolation
    plt.figure()
    plt.matshow(stack_interp.cpu(), alpha=1)
    plt.title("Interpolation")
    plt.savefig(args.figures_path + "interpolation.png")
    plt.close()
    # Generate MIDI from interpolation
    pm = pretty_midi.PrettyMIDI()
    notes, frames = stack_interp.shape
    instrument = pretty_midi.Instrument(program=program)
    # Pad 1 column of zeros to acknowledge initial and ending events
    piano_roll = np.pad(stack_interp.cpu().detach(), [(0, 0), (1, 1)],
                        'constant')
    # Use changes in velocities to find note on/note off events
    velocity_changes = np.nonzero(np.diff(piano_roll).T)
    # Keep track on velocities and note on times
    prev_velocities = np.zeros(notes, dtype=int)
    note_on_time = np.zeros(notes)
    for time, note in zip(*velocity_changes):
        # Use time + 1s because of padding above
        velocity = piano_roll[note, time + 1]
        time = time / fs
        if velocity > 0:
            if prev_velocities[note] == 0:
                note_on_time[note] = time
                prev_velocities[note] = 75
        else:
            pm_note = pretty_midi.Note(velocity=prev_velocities[note],
                                       pitch=note + args.min_pitch,
                                       start=note_on_time[note],
                                       end=time)
            instrument.notes.append(pm_note)
            prev_velocities[note] = 0
    pm.instruments.append(instrument)
    # Write out the MIDI data
    pm.write(args.midi_results_path + "interpolation.mid")
Exemple #10
0
def infer_midi(interval, agg_f0, t_unit=0.02):
    """Inference the given interval and aggregated F0 to MIDI file.

    Parameters
    ----------
    interval: list[tuple[float, float]]
        The return value of ``infer_interval`` function. List of onset/offset pairs in seconds.
    agg_f0: list[dict]
        Aggregated f0 information. Each elements in the list should contain three columns:
        *start_time*, *end_time*, and *frequency*. Time units should be in seonds, and pitch
        should be Hz.
    t_unit: float
        Time unit of each frame.

    Returns
    -------
    midi: pretty_midi.PrettyMIDI
        The inferred MIDI object.
    """
    fs = round(1 / t_unit)
    max_secs = max(record["end_time"] for record in agg_f0)
    total_frames = round(max_secs) * fs + 10
    flat_f0 = np.zeros(total_frames)
    for record in agg_f0:
        start_idx = int(round(record["start_time"] * fs))
        end_idx = int(round(record["end_time"] * fs))
        flat_f0[start_idx:end_idx] = record["frequency"]

    notes = []
    drum_notes = []
    skip_num = 0
    for onset, offset in interval:
        start_idx = int(round(onset * fs))
        end_idx = int(round(offset * fs))
        freqs = flat_f0[start_idx:end_idx]
        avg_hz = _conclude_freq(freqs)
        if avg_hz < 1e-6:
            skip_num += 1
            note = pretty_midi.Note(velocity=80,
                                    pitch=77,
                                    start=onset,
                                    end=offset)
            drum_notes.append(note)
            continue

        note_num = int(round(pretty_midi.hz_to_note_number(avg_hz)))
        if not (0 <= note_num <= 127):
            logger.warning(
                "Caught invalid note number: %d (should be in range 0~127). Skipping.",
                note_num)
            skip_num += 1
            continue
        note = pretty_midi.Note(velocity=80,
                                pitch=note_num,
                                start=onset,
                                end=offset)
        notes.append(note)

    if skip_num > 0:
        logger.warning(
            "A total of %d notes are skipped due to lack of corressponding pitch information.",
            skip_num)

    inst = pretty_midi.Instrument(program=0)
    inst.notes += notes
    drum_inst = pretty_midi.Instrument(program=1,
                                       is_drum=True,
                                       name="Missing Notes")
    drum_inst.notes += drum_notes
    midi = pretty_midi.PrettyMIDI()
    midi.instruments.append(inst)
    midi.instruments.append(drum_inst)
    return midi
Exemple #11
0
    def __init__(self,
                 root_path,
                 sequence_length=50,
                 fs=16,
                 year=-1,
                 binarize=True,
                 save_pickle=False):
        year = str(year) if not isinstance(year, str) else year
        self.sequence_length = sequence_length
        self.binarize = binarize
        self.save_pickle = save_pickle

        # Load pickle if it exists and return
        pickled_file = root_path + "/pickle/year_" + year + ".pkl"
        if Path(pickled_file).is_file():
            print('Found pickle dataset at {}. Start loading...'.format(
                pickled_file))
            with open(pickled_file, 'rb') as f:
                pickle_content = pickle.load(f)
                self.midi_data = pickle_content[0]
                self.trackID = len(self.midi_data.keys())
                self.dataset_length = pickle_content[1]
                print("Loaded pickled dataset. Size = {}, Path: {}".format(
                    self.dataset_length, pickled_file))

            return

        #TEST: we need to filter the dataset (potentially) and only include 4/4
        # Create dataset
        self.midi_files = []
        for (dirpath, dirnames, filenames) in walk(root_path):
            ff = [
                dirpath + "/" + file for file in filenames if ".midi" in file
            ]
            if year == -1 or year in dirpath:
                self.midi_files.extend(ff)

        self.trackID = 0
        self.midi_data = {}
        print('Start loading dataset..')
        # tqdm() only perform pretty loading print, does not interact with the data in any other way
        for idx, file in enumerate(tqdm(self.midi_files)):
            piano_midi = pretty_midi.PrettyMIDI(file)

            if len(piano_midi.time_signature_changes
                   ) != 1 or piano_midi.time_signature_changes[
                       0].numerator != 4 or piano_midi.time_signature_changes[
                           0].denominator != 4:
                continue  # if the time signature of the music is not 4/4 we skip this music

            # get the key of the music (by estimating the dominant semitone)
            total_velocity = sum(sum(piano_midi.get_chroma()))
            semitones = [
                sum(semitone) / total_velocity
                for semitone in piano_midi.get_chroma()
            ]
            midi_key = np.argmax(semitones)

            # Shift all notes down by midi_key semitones if major, midi_key + 3 semitones if minor
            transpose_key = midi_key if semitones[
                (midi_key + 4) % 12] > semitones[(midi_key + 3) %
                                                 12] else midi_key + 3

            # Shift all notes down by transpose_key semitones
            for instrument in piano_midi.instruments:
                for note in instrument.notes:
                    note.pitch -= transpose_key if note.pitch - transpose_key >= 0 else transpose_key - 12

            # this is the required sampling frequency to get 16 x 16th notes in a bar (1 bar = 4 beats)
            fs = (piano_midi.estimate_tempo() * 16.0) / (4.0 * 60.0)

            piano_roll = piano_midi.get_piano_roll(fs=fs)[21:109, :]

            # Binarize if set
            if self.binarize:
                piano_roll = np.clip(piano_roll, 0, 1)

            self.midi_data[self.trackID] = piano_roll
            self.trackID += 1

        print('Loaded dataset. Number of tracks = {}'.format(self.trackID + 1))

        # Pickle dataset
        if self.save_pickle:
            if not path.exists(root_path + "/pickle"):
                makedirs(root_path + "/pickle")
            with open(pickled_file, 'wb') as f:
                pickle.dump((self.midi_data, len(self)), f)
                print('Saved dataset into pickle file at {}'.format(
                    pickled_file))
Exemple #12
0
import glob, os
import pretty_midi

out_midis, out_names = ([], [])

os.chdir("../midi/piano_midi")
for file in glob.glob("*.mid"):
    pm = pretty_midi.PrettyMIDI(file)
    filename = file.split('.mid')[0]
    print("Working on %s..." % filename)

    piano_midi = pretty_midi.PrettyMIDI(
    )  # Create the new monophonic midi file
    piano_program = pretty_midi.instrument_name_to_program(
        'Acoustic Grand Piano')
    piano = pretty_midi.Instrument(
        program=piano_program)  # Create a piano instrument

    pitch, onset, offset = (-1, -1, -1)
    pm.instruments[0].notes.sort(key=lambda x: x.start, reverse=False)
    for note in pm.instruments[
            0].notes:  # Append the notes to the piano instrument
        if (note.start == onset):  # If two notes start at the same time
            if (note.pitch > pitch):
                piano.notes[len(piano.notes) -
                            1] = note  # Select the note with the higher pitch
            else:
                continue
        else:
            if (len(piano.notes) > 0
                    and note.start < offset):  # If the previous note remains,
Exemple #13
0
import pretty_midi
# Load MIDI file into PrettyMIDI object
midi_data = pretty_midi.PrettyMIDI('original_metheny.mid')
# Print an empirical estimate of its global tempo
print(midi_data.estimate_tempo())
# Compute the relative amount of each semitone across the entire song,
# a proxy for key
total_velocity = sum(sum(midi_data.get_chroma()))
print([sum(semitone) / total_velocity for semitone in midi_data.get_chroma()])
# Shift all notes up by 5 semitones
for instrument in midi_data.instruments:
    # Don't want to shift drum notes
    if not instrument.is_drum:
        for note in instrument.notes:
            note.pitch += 5
# Synthesize the resulting MIDI data using sine waves
audio_data = midi_data.synthesize()
Exemple #14
0
def note_to_index(midi_file, save_path):
    mid = pm.PrettyMIDI(str(midi_file))
    pianoroll = mid.get_piano_roll(fs=config.fs)
    index_roll = [[note_i for note_i, value in enumerate(pianoroll[:, time_i])
                   if value != 0] for time_i in range(pianoroll.shape[1])]
    pickle.dump(index_roll, open(str(save_path), 'wb'))
Exemple #15
0
import matplotlib.pyplot as plt
import numpy as np
import pretty_midi
import os
filepath = "..\\midi\\"  #添加路径
filename = os.listdir(filepath)
plt.rcParams['font.sans-serif'] = ['KaiTi']  # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题
for file in filename:
    filter_size = 256
    standard_deviation = 128
    file_name = filepath + file
    pm = pretty_midi.PrettyMIDI(file_name)
    piano_roll = pm.get_piano_roll(fs=10)
    Tx = len(piano_roll[0])
    x = np.zeros(shape=(128, Tx))
    x[:, :] = piano_roll
    x = np.transpose(x)
    S = np.zeros((Tx, Tx))
    for i in range(Tx):
        S[i] = (np.sum(np.sqrt(np.square(x - x[i])), axis=1))  # 欧氏距离
    plt.figure()
    plt.imshow(S, cmap=plt.get_cmap('hot'), aspect='auto')
    plt.title(file)
    plt.savefig('..\\自相似矩阵图\\' + file + '.png')
    plt.close()
    print(file, "自相似矩阵图已保存")
def prep_pedal_onset():
    """
    Get 500ms excerpts with/without pedal onset at 200ms    
    where pedal onset is obtained from midi file.
    """
    print('Start creating pedal-onset-dataset...')
    filename_segs = []
    filepaths = []
    ys = []
    categories = []
    for folder in FOLDERS:
        print('{}..'.format(folder))
        txt_path = os.path.join(DIR_PEDAL_METADATA, '{}.txt'.format(folder))
        filenames = np.genfromtxt(txt_path, dtype=None)

        pfolder_path = os.path.join(DIR_PEDAL_ONSET, folder, 'pedal-onset/')
        npfolder_path = os.path.join(DIR_PEDAL_ONSET, folder,
                                     'non-pedal-onset/')
        if not os.path.exists(pfolder_path):
            os.makedirs(pfolder_path)
        if not os.path.exists(npfolder_path):
            os.makedirs(npfolder_path)

        for filename in filenames:
            print('  {}..'.format(filename))
            midi_path = os.path.join(DIR_RENDERED, '{}.mid'.format(filename))
            paudio_path = os.path.join(DIR_RENDERED,
                                       '{}-p.wav'.format(filename))
            npaudio_path = os.path.join(DIR_RENDERED,
                                        '{}-np.wav'.format(filename))
            paudio, sr = librosa.load(paudio_path, sr=SR)
            npaudio, sr = librosa.load(npaudio_path, sr=SR)

            # get ground truth pedal onset time from midi
            pm = pretty_midi.PrettyMIDI(midi_path)
            pedal_v = []
            pedal_t = []
            for control_change in pm.instruments[0].control_changes:
                if control_change.number == 64:
                    pedal_v.append(control_change.value)
                    pedal_t.append(control_change.time)

            pedal_onset = []
            for i, v in enumerate(pedal_v):
                if i > 0 and v >= 64 and pedal_v[i - 1] < 64:
                    pedal_onset.append(pedal_t[i])

            pedal_onset_sp = librosa.time_to_samples(pedal_onset, sr=SR)

            for seg_idx, sp in enumerate(pedal_onset_sp):
                start_sp = int(sp - TRIM_SECOND_BEFORE * SR)
                end_sp = int(sp + TRIM_SECOND_AFTER * SR)
                newfilename = filename.replace('/', '-')

                if start_sp > 0 and end_sp < len(npaudio):
                    pout_name = '{}-p_{}.wav'.format(newfilename, seg_idx)
                    pout_path = os.path.join(pfolder_path, pout_name)
                    librosa.output.write_wav(pout_path,
                                             paudio[start_sp:end_sp], SR)
                    filename_segs.append(pout_name.rstrip('.wav'))
                    filepaths.append(
                        os.path.join(folder, 'pedal-onset/', pout_name))
                    ys.append(1)
                    categories.append(folder)

                    npout_name = '{}-np_{}.wav'.format(newfilename, seg_idx)
                    npout_path = os.path.join(npfolder_path, npout_name)
                    librosa.output.write_wav(npout_path,
                                             npaudio[start_sp:end_sp], SR)
                    filename_segs.append(npout_name.rstrip('.wav'))
                    filepaths.append(
                        os.path.join(folder, 'non-pedal-onset/', npout_name))
                    ys.append(0)
                    categories.append(folder)

    write_to_csv(zip(*[filename_segs, filepaths, ys, categories]),
                 ['filename', 'filepath', 'label', 'category'],
                 'pedal-onset_vd.csv')
    print('pedal-onset_vd.csv is saved!')
Exemple #17
0
def sequence_proto_to_pretty_midi(sequence,
                                  drop_events_n_seconds_after_last_note=None):
    """Convert tensorflow.magenta.NoteSequence proto to a PrettyMIDI.

  Time is stored in the NoteSequence in absolute values (seconds) as opposed to
  relative values (MIDI ticks). When the NoteSequence is translated back to
  PrettyMIDI the absolute time is retained. The tempo map is also recreated.

  Args:
    sequence: A tensorfow.magenta.NoteSequence proto.
    drop_events_n_seconds_after_last_note: Events (e.g., time signature changes)
        that occur this many seconds after the last note will be dropped. If
        None, then no events will be dropped.

  Returns:
    A pretty_midi.PrettyMIDI object or None if sequence could not be decoded.
  """

    ticks_per_quarter = (sequence.ticks_per_quarter
                         if sequence.ticks_per_quarter else
                         constants.STANDARD_PPQ)

    max_event_time = None
    if drop_events_n_seconds_after_last_note is not None:
        max_event_time = (max([n.end_time for n in sequence.notes] or [0]) +
                          drop_events_n_seconds_after_last_note)

    # Try to find a tempo at time zero. The list is not guaranteed to be in order.
    initial_seq_tempo = None
    for seq_tempo in sequence.tempos:
        if seq_tempo.time == 0:
            initial_seq_tempo = seq_tempo
            break

    kwargs = {}
    kwargs['initial_tempo'] = (initial_seq_tempo.qpm if initial_seq_tempo else
                               constants.DEFAULT_QUARTERS_PER_MINUTE)
    pm = pretty_midi.PrettyMIDI(resolution=ticks_per_quarter, **kwargs)

    # Create an empty instrument to contain time and key signatures.
    instrument = pretty_midi.Instrument(0)
    pm.instruments.append(instrument)

    # Populate time signatures.
    for seq_ts in sequence.time_signatures:
        if max_event_time and seq_ts.time > max_event_time:
            continue
        time_signature = pretty_midi.containers.TimeSignature(
            seq_ts.numerator, seq_ts.denominator, seq_ts.time)
        pm.time_signature_changes.append(time_signature)

    # Populate key signatures.
    for seq_key in sequence.key_signatures:
        if max_event_time and seq_key.time > max_event_time:
            continue
        key_number = seq_key.key
        if seq_key.mode == seq_key.MINOR:
            key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET
        key_signature = pretty_midi.containers.KeySignature(
            key_number, seq_key.time)
        pm.key_signature_changes.append(key_signature)

    # Populate tempos.
    # TODO(douglaseck): Update this code if pretty_midi adds the ability to
    # write tempo.
    for seq_tempo in sequence.tempos:
        # Skip if this tempo was added in the PrettyMIDI constructor.
        if seq_tempo == initial_seq_tempo:
            continue
        if max_event_time and seq_tempo.time > max_event_time:
            continue
        tick_scale = 60.0 / (pm.resolution * seq_tempo.qpm)
        tick = pm.time_to_tick(seq_tempo.time)
        # pylint: disable=protected-access
        pm._tick_scales.append((tick, tick_scale))
        pm._update_tick_to_time(0)
        # pylint: enable=protected-access

    # Populate instrument events by first gathering notes and other event types
    # in lists then write them sorted to the PrettyMidi object.
    instrument_events = defaultdict(lambda: defaultdict(list))
    for seq_note in sequence.notes:
        instrument_events[(seq_note.instrument, seq_note.program,
                           seq_note.is_drum)]['notes'].append(
                               pretty_midi.Note(seq_note.velocity,
                                                seq_note.pitch,
                                                seq_note.start_time,
                                                seq_note.end_time))
    for seq_bend in sequence.pitch_bends:
        if max_event_time and seq_bend.time > max_event_time:
            continue
        instrument_events[(seq_bend.instrument, seq_bend.program,
                           seq_bend.is_drum)]['bends'].append(
                               pretty_midi.PitchBend(seq_bend.bend,
                                                     seq_bend.time))
    for seq_cc in sequence.control_changes:
        if max_event_time and seq_cc.time > max_event_time:
            continue
        instrument_events[(seq_cc.instrument, seq_cc.program,
                           seq_cc.is_drum)]['controls'].append(
                               pretty_midi.ControlChange(
                                   seq_cc.control_number, seq_cc.control_value,
                                   seq_cc.time))

    for (instr_id, prog_id, is_drum) in sorted(instrument_events.keys()):
        # For instr_id 0 append to the instrument created above.
        if instr_id > 0:
            instrument = pretty_midi.Instrument(prog_id, is_drum)
            pm.instruments.append(instrument)
        instrument.program = prog_id
        instrument.notes = instrument_events[(instr_id, prog_id,
                                              is_drum)]['notes']
        instrument.pitch_bends = instrument_events[(instr_id, prog_id,
                                                    is_drum)]['bends']
        instrument.control_changes = instrument_events[(instr_id, prog_id,
                                                        is_drum)]['controls']

    return pm
def prep_pedal_segment():
    """
    Get varient length excerpts with/without pedal effect    
    where the length is decided by midi file.
    """
    print('Start creating pedal-segment-dataset...')
    filename_segs = []
    filepaths = []
    ys = []
    categories = []
    min_sp = int(MIN_SRC * SR)
    max_sp = int(MAX_SRC * SR)
    for folder in FOLDERS:
        print('{}..'.format(folder))
        txt_path = os.path.join(DIR_PEDAL_METADATA, '{}.txt'.format(folder))
        filenames = np.genfromtxt(txt_path, dtype=None)

        pfolder_path = os.path.join(DIR_PEDAL_SEGMENT, folder,
                                    'pedal-segment/')
        npfolder_path = os.path.join(DIR_PEDAL_SEGMENT, folder,
                                     'non-pedal-segment/')
        if not os.path.exists(pfolder_path):
            os.makedirs(pfolder_path)
        if not os.path.exists(npfolder_path):
            os.makedirs(npfolder_path)

        for filename in filenames:
            print('  {}..'.format(filename))
            # get pedal segment from midi
            midi_path = os.path.join(PATH_DATASET, '{}.mid'.format(filename))
            pm = pretty_midi.PrettyMIDI(midi_path)
            pedal_v = []
            pedal_t = []
            for control_change in pm.instruments[0].control_changes:
                if control_change.number == 64:
                    pedal_v.append(control_change.value)
                    pedal_t.append(control_change.time)

            pedal_onset = []
            pedal_offset = []
            for i, v in enumerate(pedal_v):
                if i > 0 and v >= 64 and pedal_v[i - 1] < 64:
                    pedal_onset.append(pedal_t[i])
                elif i > 0 and v < 64 and pedal_v[i - 1] >= 64:
                    pedal_offset.append(pedal_t[i])

            pedal_offset = [t for t in pedal_offset if t > pedal_onset[0]]
            seg_idxs = np.min([len(pedal_onset), len(pedal_offset)])
            pedal_offset = pedal_offset[:seg_idxs]
            pedal_onset = pedal_onset[:seg_idxs]
            for seg_idx, offset in enumerate(pedal_offset):
                if offset != pedal_offset[-1] and offset > pedal_onset[
                        seg_idx] and offset < pedal_onset[seg_idx + 1]:
                    correct_pedal_data = True
                elif offset == pedal_offset[
                        -1] and offset > pedal_onset[seg_idx]:
                    correct_pedal_data = True
                else:
                    correct_pedal_data = False

            if correct_pedal_data:
                pedal_onset_sp = librosa.time_to_samples(pedal_onset, sr=SR)
                pedal_offset_sp = librosa.time_to_samples(pedal_offset, sr=SR)
                paudio_path = os.path.join(DIR_RENDERED,
                                           '{}-p.wav'.format(filename))
                npaudio_path = os.path.join(DIR_RENDERED,
                                            '{}-np.wav'.format(filename))
                paudio, sr = librosa.load(paudio_path, sr=SR)
                npaudio, sr = librosa.load(npaudio_path, sr=SR)
                for seg_idx, start_sp in enumerate(pedal_onset_sp):
                    end_sp = pedal_offset_sp[seg_idx]
                    len_sp = end_sp - start_sp
                    if len_sp > max_sp:
                        end_sp = start_sp + max_sp

                    if len_sp >= min_sp and end_sp < len(npaudio):
                        newfilename = filename.replace('/', '-')
                        pout_name = '{}-p_{}.wav'.format(newfilename, seg_idx)
                        pout_path = os.path.join(pfolder_path, pout_name)
                        librosa.output.write_wav(pout_path,
                                                 paudio[start_sp:end_sp], SR)
                        filename_segs.append(pout_name.rstrip('.wav'))
                        filepaths.append(
                            os.path.join(folder, 'pedal-segment/', pout_name))
                        ys.append(1)
                        categories.append(folder)

                        npout_name = '{}-np_{}.wav'.format(
                            newfilename, seg_idx)
                        npout_path = os.path.join(npfolder_path, npout_name)
                        librosa.output.write_wav(npout_path,
                                                 npaudio[start_sp:end_sp], SR)
                        filename_segs.append(npout_name.rstrip('.wav'))
                        filepaths.append(
                            os.path.join(folder, 'non-pedal-segment/',
                                         npout_name))
                        ys.append(0)
                        categories.append(folder)

    write_to_csv(zip(*[filename_segs, filepaths, ys, categories]),
                 ['filename', 'filepath', 'label', 'category'],
                 'pedal-segment_vd.csv')
    print('pedal-segment_vd.csv is saved!')
Exemple #19
0
# By AladMocu (with linkhl09 help :v)
import pretty_midi as midi
import sys

songname = sys.argv[1]

song = midi.PrettyMIDI(songname)
print(song.instruments)
print(song.get_beats)

li = 0
for instrument in song.instruments:
    li += 1
    transpose = ""
    # Don't want to shift drum notes
    transpose += "#{}\n".format(instrument)
    if not instrument.is_drum:
        mc = [["1", 0], ["2", 0], ["3", 0], ["4", 0], ["5", 0], ["6", 0]]
        for note in instrument.notes:
            a = midi.note_number_to_name(note.pitch)
            a = a.replace('#', '')
            t = int(a[-1:])
            mc[t - 1][1] += 1
        print(instrument.name, " : ", mc)
        t = []
        maxv = 0
        for n in range(4):
            if mc[n][1] + mc[n + 1][1] + mc[n + 2][1] > maxv:
                maxv = mc[n][1] + mc[n + 1][1] + mc[n + 2][1]
                t = mc[n:n + 3]
        mc = list(map(lambda x: str(x[0]), t))
Exemple #20
0
def interpolation(args,
                  model,
                  dataset,
                  x_a=None,
                  x_b=None,
                  output='output/',
                  fs=25,
                  program=0):
    if (x_a is None):
        x_a, x_b = dataset[random.randint(
            0,
            len(dataset) - 1)], dataset[random.randint(0,
                                                       len(dataset) - 1)]
        x_a, x_b = x_a.to(args.device), x_b.to(args.device)
    # Encode samples to the latent space
    z_a, z_b = model.encode(x_a.unsqueeze(0)), model.encode(x_b.unsqueeze(0))
    # Run through alpha values
    interp = []
    alpha_values = np.linspace(0, 1, args.n_steps)
    for alpha in alpha_values:
        z_interp = (1 - alpha) * z_a[0] + alpha * z_b[0]
        interp.append(model.decode(z_interp))
    # Draw interpolation step by step
    i = 0
    stack_interp = []
    for step in interp:
        if args.num_classes > 1:
            step = torch.argmax(step[0], dim=0)
        stack_interp.append(step)
        # plt.matshow(step.cpu().detach(), alpha=1)
        # plt.title("Interpolation " + str(i))
        # plt.savefig(args.figures_path + "interpolation" + str(i) + ".png")
        # plt.close()
        i += 1
    stack_interp = torch.cat(stack_interp, dim=1)
    # Draw stacked interpolation
    plt.figure()
    plt.matshow(stack_interp.cpu(), alpha=1)
    plt.title("Interpolation")
    plt.savefig(output + "_interpolation.png")
    plt.close()
    # Generate MIDI from interpolation
    pm = pretty_midi.PrettyMIDI()
    notes, frames = stack_interp.shape
    instrument = pretty_midi.Instrument(program=program)
    # Pad 1 column of zeros to acknowledge initial and ending events
    piano_roll = np.pad(stack_interp.cpu().detach(), [(0, 0), (1, 1)],
                        'constant')
    # Use changes in velocities to find note on/note off events
    velocity_changes = np.nonzero(np.diff(piano_roll).T)
    # Keep track on velocities and note on times
    prev_velocities = np.zeros(notes, dtype=int)
    note_on_time = np.zeros(notes)
    # Do prettier representation
    fig = plt.figure(figsize=(18, 4), dpi=80)
    ax = plt.subplot(1, 1, 1)
    min_pitch = np.inf
    max_pitch = 0
    cmap = plt.get_cmap('inferno', args.n_steps + 3)
    for time, note in zip(*velocity_changes):
        # Use time + 1s because of padding above
        velocity = piano_roll[note, time + 1]
        time = time / fs
        if velocity > 0:
            if prev_velocities[note] == 0:
                note_on_time[note] = time
                prev_velocities[note] = 75
        else:
            pm_note = pretty_midi.Note(velocity=prev_velocities[note],
                                       pitch=note + args.min_pitch,
                                       start=note_on_time[note],
                                       end=time)
            instrument.notes.append(pm_note)
            prev_velocities[note] = 0
            rect = patches.Rectangle(
                (note_on_time[note] * fs, note + args.min_pitch - 0.5),
                (time - note_on_time[note]) * fs,
                1,
                linewidth=1.5,
                edgecolor='k',
                facecolor=cmap(int(note_on_time[note] * fs / 64)),
                alpha=0.8)
            min_pitch = min(min_pitch, note + args.min_pitch)
            max_pitch = max(max_pitch, note + args.min_pitch)
            ax.add_patch(rect)
    ax.set_ylim([min_pitch - 5, max_pitch + 5])
    ax.set_xticks(np.arange(64, 64 * args.n_steps, 64))
    ax.set_xticklabels(np.arange(1, args.n_steps, 1))
    ax.set_xlim([0, time * fs])
    ax.set_xlabel('Interpolated measures')
    ax.set_ylabel('Pitch')
    ax.grid()
    plt.tight_layout()
    plt.savefig(output + "_interpolation.pdf")
    pm.instruments.append(instrument)
    # Write out the MIDI data
    pm.write(output + "_interpolation.mid")
Exemple #21
0
def midi_to_note_sequence(midi_data):
    """Convert MIDI file contents to a NoteSequence.

  Converts a MIDI file encoded as a string into a NoteSequence. Decoding errors
  are very common when working with large sets of MIDI files, so be sure to
  handle MIDIConversionError exceptions.

  Args:
    midi_data: A string containing the contents of a MIDI file or populated
        pretty_midi.PrettyMIDI object.

  Returns:
    A NoteSequence.

  Raises:
    MIDIConversionError: An improper MIDI mode was supplied.
  """
    # In practice many MIDI files cannot be decoded with pretty_midi. Catch all
    # errors here and try to log a meaningful message. So many different
    # exceptions are raised in pretty_midi.PrettyMidi that it is cumbersome to
    # catch them all only for the purpose of error logging.
    # pylint: disable=bare-except
    if isinstance(midi_data, pretty_midi.PrettyMIDI):
        midi = midi_data
    else:
        try:
            midi = pretty_midi.PrettyMIDI(six.BytesIO(midi_data))
        except:
            raise MIDIConversionError('Midi decoding error %s: %s' %
                                      (sys.exc_info()[0], sys.exc_info()[1]))
    # pylint: enable=bare-except

    sequence = music_pb2.NoteSequence()

    # Populate header.
    sequence.ticks_per_quarter = midi.resolution
    sequence.source_info.parser = music_pb2.NoteSequence.SourceInfo.PRETTY_MIDI
    sequence.source_info.encoding_type = (
        music_pb2.NoteSequence.SourceInfo.MIDI)

    # Populate time signatures.
    for midi_time in midi.time_signature_changes:
        time_signature = sequence.time_signatures.add()
        time_signature.time = midi_time.time
        time_signature.numerator = midi_time.numerator
        try:
            # Denominator can be too large for int32.
            time_signature.denominator = midi_time.denominator
        except ValueError:
            raise MIDIConversionError('Invalid time signature denominator %d' %
                                      midi_time.denominator)

    # Populate key signatures.
    for midi_key in midi.key_signature_changes:
        key_signature = sequence.key_signatures.add()
        key_signature.time = midi_key.time
        key_signature.key = midi_key.key_number % 12
        midi_mode = midi_key.key_number // 12
        if midi_mode == 0:
            key_signature.mode = key_signature.MAJOR
        elif midi_mode == 1:
            key_signature.mode = key_signature.MINOR
        else:
            raise MIDIConversionError('Invalid midi_mode %i' % midi_mode)

    # Populate tempo changes.
    tempo_times, tempo_qpms = midi.get_tempo_changes()
    for time_in_seconds, tempo_in_qpm in zip(tempo_times, tempo_qpms):
        tempo = sequence.tempos.add()
        tempo.time = time_in_seconds
        tempo.qpm = tempo_in_qpm

    for time in midi.get_beats():
        annotation = sequence.text_annotations.add()
        annotation.time = time
        annotation.annotation_type = music_pb2.NoteSequence.TextAnnotation.BEAT

    # Populate notes by gathering them all from the midi's instruments.
    # Also set the sequence.total_time as the max end time in the notes.
    midi_notes = []
    midi_pitch_bends = []
    midi_control_changes = []
    for num_instrument, midi_instrument in enumerate(midi.instruments):
        # Populate instrument name from the midi's instruments
        if midi_instrument.name:
            instrument_info = sequence.instrument_infos.add()
            instrument_info.name = midi_instrument.name
            instrument_info.instrument = num_instrument
        for midi_note in midi_instrument.notes:
            if not sequence.total_time or midi_note.end > sequence.total_time:
                sequence.total_time = midi_note.end
            midi_notes.append((midi_instrument.program, num_instrument,
                               midi_instrument.is_drum, midi_note))
        for midi_pitch_bend in midi_instrument.pitch_bends:
            midi_pitch_bends.append((midi_instrument.program, num_instrument,
                                     midi_instrument.is_drum, midi_pitch_bend))
        for midi_control_change in midi_instrument.control_changes:
            midi_control_changes.append(
                (midi_instrument.program, num_instrument,
                 midi_instrument.is_drum, midi_control_change))

    for program, instrument, is_drum, midi_note in midi_notes:
        note = sequence.notes.add()
        note.instrument = instrument
        note.program = program
        note.start_time = midi_note.start
        note.end_time = midi_note.end
        note.pitch = midi_note.pitch
        note.velocity = midi_note.velocity
        note.is_drum = is_drum

    for program, instrument, is_drum, midi_pitch_bend in midi_pitch_bends:
        pitch_bend = sequence.pitch_bends.add()
        pitch_bend.instrument = instrument
        pitch_bend.program = program
        pitch_bend.time = midi_pitch_bend.time
        pitch_bend.bend = midi_pitch_bend.pitch
        pitch_bend.is_drum = is_drum

    for program, instrument, is_drum, midi_control_change in midi_control_changes:
        control_change = sequence.control_changes.add()
        control_change.instrument = instrument
        control_change.program = program
        control_change.time = midi_control_change.time
        control_change.control_number = midi_control_change.number
        control_change.control_value = midi_control_change.value
        control_change.is_drum = is_drum

    # TODO(douglaseck): Estimate note type (e.g. quarter note) and populate
    # note.numerator and note.denominator.

    return sequence
Exemple #22
0
def tx1_to_midi(tx1):
  import pretty_midi

  tx1 = tx1.strip().splitlines()
  nsamps = sum([int(x.split('_')[1]) for x in tx1 if x[:2] == 'WT'])

  # Create MIDI instruments
  p1_prog = pretty_midi.instrument_name_to_program('Lead 1 (square)')
  p2_prog = pretty_midi.instrument_name_to_program('Lead 2 (sawtooth)')
  tr_prog = pretty_midi.instrument_name_to_program('Synth Bass 1')
  no_prog = pretty_midi.instrument_name_to_program('Breath Noise')
  p1 = pretty_midi.Instrument(program=p1_prog, name='p1', is_drum=False)
  p2 = pretty_midi.Instrument(program=p2_prog, name='p2', is_drum=False)
  tr = pretty_midi.Instrument(program=tr_prog, name='tr', is_drum=False)
  no = pretty_midi.Instrument(program=no_prog, name='no', is_drum=True)

  name_to_ins = {'P1': p1, 'P2': p2, 'TR': tr, 'NO': no}
  name_to_pitch = {'P1': None, 'P2': None, 'TR': None, 'NO': None}
  name_to_start = {'P1': None, 'P2': None, 'TR': None, 'NO': None}
  name_to_max_velocity = {'P1': 15, 'P2': 15, 'TR': 1, 'NO': 15}

  samp = 0
  for event in tx1:
    if event[:2] == 'WT':
      samp += int(event[3:])
    else:
      tokens = event.split('_')
      name = tokens[0]
      ins = name_to_ins[tokens[0]]

      old_pitch = name_to_pitch[name]
      if tokens[1] == 'NOTEON':
        if old_pitch is not None:
          ins.notes.append(pretty_midi.Note(
              velocity=name_to_max_velocity[name],
              pitch=old_pitch,
              start=name_to_start[name] / 44100.,
              end=samp / 44100.))
        name_to_pitch[name] = int(tokens[2])
        name_to_start[name] = samp
      else:
        if old_pitch is not None:
          ins.notes.append(pretty_midi.Note(
              velocity=name_to_max_velocity[name],
              pitch=name_to_pitch[name],
              start=name_to_start[name] / 44100.,
              end=samp / 44100.))

        name_to_pitch[name] = None
        name_to_start[name] = None

  # Deactivating this for generated files
  #for name, pitch in name_to_pitch.items():
  #  assert pitch is None

  # Create MIDI and add instruments
  midi = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050)
  midi.instruments.extend([p1, p2, tr, no])

  # Create indicator for end of song
  eos = pretty_midi.TimeSignature(1, 1, nsamps / 44100.)
  midi.time_signature_changes.append(eos)

  with tempfile.NamedTemporaryFile('rb') as mf:
    midi.write(mf.name)
    midi_str = mf.read()

  return midi, midi_str
Exemple #23
0
def synth_midi(midi_path, output_path, sampling_rate=44100, sf2_path=SOUNDFONT_PATH):
    """Synthesize MIDI into wav audio."""
    midi = pretty_midi.PrettyMIDI(midi_path)
    raw_wav = midi.fluidsynth(fs=sampling_rate, sf2_path=sf2_path)
    wave.write(output_path, sampling_rate, raw_wav)
Exemple #24
0
 def CheckMidiToSequence(self, filename):
     """Test the translation from PrettyMIDI to Sequence proto."""
     source_midi = pretty_midi.PrettyMIDI(filename)
     sequence_proto = midi_io.midi_to_sequence_proto(source_midi)
     self.CheckPrettyMidiAndSequence(source_midi, sequence_proto)
Exemple #25
0
def main():
    parser = argparse.ArgumentParser("Script to generate MIDI tracks by sampling from a trained model.")

    parser.add_argument("--model", type=str, 
            help="Key in saved_models/model.yaml, helps look up model arguments and path to saved checkpoint.")
    parser.add_argument("--sample_length", type=int, default=512,
            help="number of events to generate")
    parser.add_argument("--temps", nargs="+", type=float, 
            default=[1.0],
            help="space-separated list of temperatures to use when sampling")
    parser.add_argument("--n_trials", type=int, default=3,
            help="number of MIDI samples to generate per experiment")
    parser.add_argument("--live_input", action='store_true', default = False,
            help="if true, take in a seed from a MIDI input controller")

    parser.add_argument("--play_live", action='store_true', default=False,
            help="play sample(s) at end of script if true")
    parser.add_argument("--keep_ghosts", action='store_true', default=True)
    parser.add_argument("--stuck_note_duration", type=int, default=1)

    args=parser.parse_args()

    model = args.model
    '''
    try:
        model_dict = yaml.safe_load(open('saved_models/model.yaml'))[model_key]
    except:
        raise GeneratorError(f"could not find yaml information for key {model_key}")
    '''
    #model_path = model_dict["path"]
    #model_args = model_dict["args"]

    #Change the value here to the model you want to run
    model_path = 'saved_models/'+model

    try:
        state = torch.load(model_path)
    except RuntimeError:
        state = torch.load(model_path, map_location="cpu")
    
    n_velocity_events = 32
    n_time_shift_events = 125

    decoder = SequenceEncoder(n_time_shift_events, n_velocity_events,
           min_events=0)

    if args.live_input:
        pretty_midis = []
        m = 'twinkle.midi'
        with open(m, "rb") as f:
                try:
                    midi_str = six.BytesIO(f.read())
                    pretty_midis.append(pretty_midi.PrettyMIDI(midi_str))
                    #print("Successfully parsed {}".format(m))
                except:
                    print("Could not parse {}".format(m))
        pipeline = PreprocessingPipeline(input_dir="data")
        note_sequence = pipeline.get_note_sequences(pretty_midis)
        note_sequence = [vectorize(ns) for ns in note_sequence]
        prime_sequence = decoder.encode_sequences(note_sequence)
        prime_sequence = prime_sequence[1:6]


    else:
        prime_sequence = []

   #model = MusicTransformer(**model_args)
    model = MusicTransformer(256+125+32, 1024, 
            d_model = 64, n_heads = 8, d_feedforward=256, 
            depth = 4, positional_encoding=True, relative_pos=True)

    model.load_state_dict(state, strict=False)

    temps = args.temps

    trial_key = str(uuid.uuid4())[:6]
    n_trials = args.n_trials

    keep_ghosts = args.keep_ghosts
    stuck_note_duration = None if args.stuck_note_duration == 0 else args.stuck_note_duration

    for temp in temps:
        print(f"sampling temp={temp}")
        note_sequence = []
        for i in range(n_trials):
            print("generating sequence")
            output_sequence = sample(model, prime_sequence = prime_sequence, sample_length=args.sample_length, temperature=temp)
            note_sequence = decoder.decode_sequence(output_sequence, 
                verbose=True, stuck_note_duration=0.5, keep_ghosts=True)

            output_dir = f"output/midis/{trial_key}/"
            file_name = f"sample{i+1}_{temp}"
            write_midi(note_sequence, output_dir, file_name)
    '''
Exemple #26
0
 def CheckSequenceToPrettyMidi(self, filename):
     """Test the translation from Sequence proto to PrettyMIDI."""
     source_midi = pretty_midi.PrettyMIDI(filename)
     sequence_proto = midi_io.midi_to_sequence_proto(source_midi)
     translated_midi = midi_io.sequence_proto_to_pretty_midi(sequence_proto)
     self.CheckPrettyMidiAndSequence(translated_midi, sequence_proto)
Exemple #27
0
def midi_to_sequence_proto(midi_data, continue_on_exception=False):
    """Convert MIDI file contents to a tensorflow.magenta.NoteSequence proto.

  Converts a MIDI file encoded as a string into a
  tensorflow.magenta.NoteSequence proto. Decoding errors are very common when
  working with large sets of MIDI files. To support batch processing the
  argument continue_on_exception (when True) will catch all exceptions from the
  decoding library pretty_midi, log an error, and return None.

  Args:
    midi_data: A string containing the contents of a MIDI file or populated
        pretty_midi.PrettyMIDI object.
    continue_on_exception: A boolean that when true causes all exceptions from
        the decoder to be caught and ignored. Instead, an error is logged and
        None is returned.

  Returns:
    A tensorflow.magenta.NoteSequence proto or None if midi_data could not be
    decoded and if continue_on_exception is True.

  Raises:
    MIDIConversionError: An improper MIDI mode was supplied.
  """

    # In practice many MIDI files cannot be decoded with pretty_midi. Catch all
    # errors here and try to log a meaningful message. So many different
    # exceptions are raised in pretty_midi.PrettyMidi that it is cumbersome to
    # catch them all only for the purpose of error logging.
    # pylint: disable=bare-except
    if isinstance(midi_data, pretty_midi.PrettyMIDI):
        midi = midi_data
    else:
        try:
            midi = pretty_midi.PrettyMIDI(StringIO(midi_data))
        except:
            if continue_on_exception:
                tf.logging.error('Midi decoding error %s: %s',
                                 sys.exc_info()[0],
                                 sys.exc_info()[1])
                return None
            else:
                raise MIDIConversionError('Midi decoding error %s: %s',
                                          sys.exc_info()[0],
                                          sys.exc_info()[1])
    # pylint: enable=bare-except

    sequence = music_pb2.NoteSequence()

    # Populate header.
    sequence.ticks_per_beat = midi.resolution

    # Populate time signatures.
    for midi_time in midi.time_signature_changes:
        time_signature = sequence.time_signatures.add()
        time_signature.time = midi_time.time
        time_signature.numerator = midi_time.numerator
        time_signature.denominator = midi_time.denominator

    # Populate key signatures.
    for midi_key in midi.key_signature_changes:
        key_signature = sequence.key_signatures.add()
        key_signature.time = midi_key.time
        key_signature.key = midi_key.key_number % 12
        midi_mode = midi_key.key_number / 12
        if midi_mode == 0:
            key_signature.mode = key_signature.MAJOR
        elif midi_mode == 1:
            key_signature.mode = key_signature.MINOR
        else:
            raise MIDIConversionError('Invalid midi_mode %i' % midi_mode)

    # Populate tempo changes.
    tempo_times, tempo_bpms = midi.get_tempo_changes()
    for time_in_seconds, tempo_in_bpm in zip(tempo_times, tempo_bpms):
        tempo = sequence.tempos.add()
        tempo.time = time_in_seconds
        tempo.bpm = tempo_in_bpm

    # Populate notes by first gathering them all from the midi's instruments, then
    # sorting them primarily by start and secondarily by end, and finally looping
    # through this sorted list and appending each as a new sequence.note. We also
    # here set the sequence.total_time as the max end time in the notes.
    # TODO(@douglaseck): Eliminate some of this boilerplate code.
    midi_notes = []
    midi_pitch_bends = []
    midi_control_changes = []
    for num_instrument, midi_instrument in enumerate(midi.instruments):
        for midi_note in midi_instrument.notes:
            if not sequence.total_time or midi_note.end > sequence.total_time:
                sequence.total_time = midi_note.end
            midi_notes.append(
                (midi_instrument.program, num_instrument, midi_note))
        for midi_pitch_bend in midi_instrument.pitch_bends:
            midi_pitch_bends.append(
                (midi_instrument.program, num_instrument, midi_pitch_bend))
        for midi_control_change in midi_instrument.control_changes:
            midi_control_changes.append(
                (midi_instrument.program, num_instrument, midi_control_change))

    for program, instrument, midi_note in midi_notes:
        note = sequence.notes.add()
        note.instrument = instrument
        note.program = program
        note.start_time = midi_note.start
        note.end_time = midi_note.end
        note.pitch = midi_note.pitch
        note.velocity = midi_note.velocity

    for program, instrument, midi_pitch_bend in midi_pitch_bends:
        pitch_bend = sequence.pitch_bends.add()
        pitch_bend.instrument = instrument
        pitch_bend.program = program
        pitch_bend.time = midi_pitch_bend.time
        pitch_bend.bend = midi_pitch_bend.pitch

    for program, instrument, midi_control_change in midi_control_changes:
        control_change = sequence.control_changes.add()
        control_change.instrument = instrument
        control_change.program = program
        control_change.time = midi_control_change.time
        control_change.control_number = midi_control_change.number
        control_change.control_value = midi_control_change.value

    # TODO(@douglaseck): Estimate note type (e.g. quarter note) and populate
    # note.numerator and note.denominator.

    return sequence
import pretty_midi

extracted_drum = pretty_midi.PrettyMIDI()
generated_drum = pretty_midi.Instrument(program=1)
generated_drum.is_drum = False

for i in range(10):
    new_note = pretty_midi.Note(velocity=110,
                                pitch=35,
                                start=i * 2,
                                end=i * 2 + .3)
    generated_drum.notes.append(new_note)
extracted_drum.instruments.append(generated_drum)
extracted_drum.write('pitch_test.mid')

# 25 ~ 85
Exemple #29
0
 def load_midi_melody(self, midi=None):
     if midi is None:
         midi = pretty_midi.PrettyMIDI(FLAGS.prime_midi_melody_fpath)
     return self.decoder.encode_midi_melody_to_pianoroll(midi)
def decode_midi(idx_array, file_path=None):
    event_sequence = [Event.from_int(idx) for idx in idx_array]
    # print(event_sequence)
    snote_seq = _event_seq2snote_seq(event_sequence)
    note_seq = _merge_note(snote_seq)
    note_seq.sort(key=lambda x: x.start)

    mid = pretty_midi.PrettyMIDI()
    # if want to change instument, see https://www.midi.org/specifications/item/gm-level-1-sound-set
    instument = pretty_midi.Instrument(1, False, "Developed By Yang-Kichang")
    instument.notes = note_seq

    mid.instruments.append(instument)
    if file_path is not None:
        mid.write(file_path)
    return mid


if __name__ == '__main__':
    encoded = encode_midi('bin/ADIG04.mid')
    print(encoded)
    decided = decode_midi(encoded, file_path='bin/test.mid')

    ins = pretty_midi.PrettyMIDI('bin/ADIG04.mid')
    print(ins)
    print(ins.instruments[0])
    for i in ins.instruments:
        print(i.control_changes)
        print(i.notes)