示例#1
0
文件: musing.py 项目: lfzark/musing
    def midi_to_note(filename=DEFAULT_MIDI_FILE, is_name=False, instrument_id=False):
        '''
        Load midi to note name list
        '''
        note_list = []
        try:
            midi_data = pretty_midi.PrettyMIDI(filename)
        except IOError as ioe:
            raise ValueError('PrettyMIDI - IOError: %s' % (ioe))

        if instrument_id:
            for note in midi_data.instruments[instrument_id].notes:
                if is_name:
                    note_list.append(
                        pretty_midi.note_number_to_name(note.pitch))
                else:
                    note_list.append(note)

        else:
            for instrument in midi_data.instruments:
                if not instrument.is_drum:
                    for note in instrument.notes:
                        if is_name:
                            note_list.append(
                                pretty_midi.note_number_to_name(note.pitch))
                        else:
                            note_list.append(note)
        return note_list
def find_midi_note_index(midi_notes, start, end, pitch, ornament=False):
    """
    find corresponding midi note index for one xml note

    Parameters
    -----------
    midi_notes : list of midi note object
    start: midi start time in match_list
    end : midi end time in match_list
    pitch : midi pitch in match_list (in string)
    ornament : whether it's ornament

    Returns
    -----------
    dictionary of midi index and pitch
    """
    pitch = check_pitch(pitch)
    if not ornament:
        for i, note in enumerate(midi_notes):
            if (abs(note.start - start) < 0.001) and (abs(note.end - end) < 0.001) and (note.pitch == pretty_midi.note_name_to_number(pitch)):
                return {'idx': i, 'pitch': pretty_midi.note_number_to_name(note.pitch)}
    else:
        for i, note in enumerate(midi_notes):
            if (abs(note.start - start) < 0.001) and (abs(note.end - end) < 0.001) and (abs(note.pitch - pretty_midi.note_name_to_number(pitch)) <= 2):
                return {'idx': i, 'pitch': pretty_midi.note_number_to_name(note.pitch)}
    return -1
示例#3
0
def chopster(dframe):
    # Figure out range of frame (0-128)
    df_max = dframe.max(axis=0)

    dframe.drop(labels=[
        pretty_midi.note_number_to_name(n) for n in range(m_midi_end, 128)
    ],
                axis=1,
                inplace=True)
    dframe.drop(labels=[
        pretty_midi.note_number_to_name(n) for n in range(0, m_midi_start)
    ],
                axis=1,
                inplace=True)
    return dframe
示例#4
0
 def convert_midi_num_to_note_name(self, midi_num):
     # midi note number & note name
     # https://newt.phys.unsw.edu.au/jw/notes.html
     if midi_num < 21 or midi_num > 108:
         return ""
     else:
         return pretty_midi.note_number_to_name(int(midi_num))
示例#5
0
def accompaniment_generator(outputpath, base_key, tempo, measure, n_passages):
    """
    Attribute
    outputpath: str, output file's path
    base_key: int, midi-note-number
    tempo: int
    measure: int, recommend 3 or 4
    n_passage: int, length of music
    melody: boolean, generate melody or not
    """
    assert base_key <= 60 and base_key >= 40, 'base_key is out of range'

    passage_time = measure/(tempo/60)    # passage time

    # create a PrettyMIDI object
    piano_chord = pm.PrettyMIDI(initial_tempo=tempo)
    # create an Instrument instance for a cello instrument (defined in constants.py)
    piano_program = pm.instrument_name_to_program('Orchestral Harp')
    piano = pm.Instrument(program=piano_program)

    INIT_STATE = 0
    cur_state = INIT_STATE
    chord_progression = []

    for i in range(0, n_passages):
        cur_state = get_next_state(STATE_PROB[cur_state])
        chord = [note + base_key for note in chords[cur_state]]

        put_rondo(piano, chord, i*passage_time, passage_time, 3)
        chord_progression.append([pm.note_number_to_name(note) for note in chord])

    piano_chord.instruments.append(piano)
    piano_chord.write(outputpath)
    return chord_progression
示例#6
0
    def __init__(self, csv_file, transform, midi_start=48, midi_end=108):
        """
        Args:
            csv_file (string): Path to the csv file with piano rolls per song.
            transform (callable): Transform to be applied on a sample, is expected to implement "get_sections".
            midi_start (int): The first midi note in the dataset
            midi_end (int): The last midi note in the dataset
        """

        dtypes = {'piano_roll_name': 'str', 'timestep': 'uint32'}
        column_names = [
            pretty_midi.note_number_to_name(n)
            for n in range(midi_start, midi_end)
        ]
        for column in column_names:
            dtypes[column] = 'uint8'

        self.piano_rolls = pd.read_csv(
            csv_file,
            sep=',',
            index_col=['piano_roll_name', 'timestep'],
            dtype=dtypes)
        self.transform = transform

        self.init_dataset()
示例#7
0
    def parse_track(self, track_num, is_solo=True):
        """
        Parses the given track and returns a list of note objects
        * Solo part will use Pitch enum for pitch
        * Accompaniment parts will use midi number for pitch.
        * Long notes > 4 beats will get split into 4 beats + the rest of the note.
        :param track_num:
        :param is_solo:
        :return:
        """
        notes = []
        track = self.midi_file.tracks[track_num]
        for i in range(1, len(track.events) - 1):
            event = track.events[i]
            delta_time = track.events[i + 1]

            if delta_time.time is not None and delta_time.time > 1:
                if event.type != self.NOTE_ON or event.velocity == 0:
                    pitch = Pitch.REST
                else:
                    if is_solo:
                        pitch = Note.note_name_to_pitch_enum(
                            pretty_midi.note_number_to_name(event.pitch))
                    else:
                        pitch = event.pitch

                num_beats = round(
                    delta_time.time / self.midi_file.ticksPerQuarterNote, 2)
                while num_beats > 4:  # while num beats is greater than beats left in bar.
                    notes.append(Note(pitch, Duration(4).value))
                    num_beats -= 4

                if num_beats > 0:
                    notes.append(Note(pitch, Duration(num_beats).value))
        return notes
示例#8
0
def chord_notes(chord_name, octaves=[4]):
    roots = chord_root(chord_name, octaves)
    notes = chord_mode_offsets(chord_name)
    n = []
    for r in roots:
        n += [pm.note_number_to_name(r + note) for note in notes]
    return n
示例#9
0
def get_midi_data(userNoteName, start, end):
    print('Time for', userNoteName, 'got:', time.time())
    if len(userNoteName) == 3:
        userNoteName = userNoteName[0] + '#' + userNoteName[2]
    primer = pretty_midi.PrettyMIDI()
    instrument = pretty_midi.Instrument(
        program=pretty_midi.instrument_name_to_program('Cello'))
    noteNumber = pretty_midi.note_name_to_number(userNoteName)
    try:
        note = pretty_midi.Note(velocity=100,
                                pitch=noteNumber,
                                start=int(start),
                                end=int(start) + int(end))
    except ValueError:
        return []
    instrument.notes.append(note)
    primer.instruments.append(instrument)
    output = generate_midi(primer, total_seconds=2)  # Takes about 4-6 seconds
    aiNotes = []
    try:
        note = output.instruments[0].notes[0]
        notePitch, noteStart, noteEnd = pretty_midi.note_number_to_name(
            note.pitch), note.start, note.end
        if len(notePitch) == 3:
            notePitch = notePitch[0] + 's' + notePitch[2]
        aiNotes.append((notePitch, noteStart, noteEnd))
        print('AI notes are', aiNotes)
        return aiNotes
    except IndexError:
        return []
def compare_piano_rolls(piano_roll1,
                        piano_roll2,
                        crop=None,
                        title="",
                        time_grid=False,
                        show=False):
    if crop:
        labels = list(range(crop[0], crop[1]))
    else:
        labels = list(range(0, 128))
    labels = [pm.note_number_to_name(x) for x in labels]

    fig, [ax1, ax2] = plt.subplots(2, 1)
    ax1.imshow(piano_roll1, aspect='auto', origin='lower')
    ax1.set_yticks([x + 0.5 for x in list(range(len(labels)))])
    ax1.set_yticklabels(labels, fontsize=5)
    if time_grid:
        ax1.set_xticks([x + 0.5 for x in list(range(piano_roll1.shape[1]))])
        ax1.grid(True, axis='both', color='black')
    else:
        ax1.grid(True, axis='y', color='black')
    plt.title(title)

    ax2.imshow(piano_roll2, aspect='auto', origin='lower')
    ax2.set_yticks([x + 0.5 for x in list(range(len(labels)))])
    ax2.set_yticklabels(labels, fontsize=5)
    if time_grid:
        ax2.set_xticks([x + 0.5 for x in list(range(piano_roll2.shape[1]))])
        ax2.grid(True, axis='both', color='black')
    else:
        ax2.grid(True, axis='y', color='black')
    if show:
        plt.show()
def print_data(data):

    print('*' * 33)
    print('melody for the data is:')

    notes_per_line = 16
    number_of_ts = len(data[0])

    t = 0
    while t < number_of_ts - 1:
        for inst in range(INSTRUMENTS):
            for i in range(notes_per_line):
                onset = False
                for pitch in range(128):
                    if not onset and t < number_of_ts and data[inst, t, pitch]:
                        note_name = pretty_midi.note_number_to_name(pitch)
                        sys.stdout.write('{} '.format(note_name))
                        if len(note_name) == 2:
                            sys.stdout.write(' ')
                        onset = True
                if t < number_of_ts and data[inst, t, NUMBER_FEATURES - 3]:
                    sys.stdout.write('--  ')
                if t < number_of_ts and data[inst, t, NUMBER_FEATURES - 2]:
                    sys.stdout.write('00  ')
                t += 1
            if inst != INSTRUMENTS - 1:
                t -= notes_per_line
            print()
        print()
示例#12
0
 def __init__(self, in_note_on, in_clip_cursor):
     self.is_playing = True
     self.note = in_note_on.note
     self.note_pretty = pretty_midi.note_number_to_name(self.note)
     self.velocity = in_note_on.velocity
     self.channel = in_note_on.channel
     self._ticks_start = in_clip_cursor
     pass
示例#13
0
def create_midi_to_str(predicted_pitch, CREATED_FILE_NAME, OUTPUT_FOLDER):
    new_name = CREATED_FILE_NAME

    with open(OUTPUT_FOLDER + new_name + ".txt", "w+") as new_file:
        for i in range(len(predicted_pitch)):
            for pitch in predicted_pitch[i].split("-"):
                note = pretty_midi.note_number_to_name(int(pitch))
                new_file.write(note + " ")
示例#14
0
def encode_dummies(instrument, sampling_freq):
    """ Gonna cheat a little bit by transposing the instrument piano roll.
        However, that leaves us with a lot of blank space.
        Coercing to type uint8, since the max velocity is 127, and there are also only 128 notes. uint8 goes to 255.
        Saves memory
    """
    note_columns = [pretty_midi.note_number_to_name(n) for n in range(0, 128)]
    pr = instrument.get_piano_roll(fs=sampling_freq).astype('uint8').T
    return pd.DataFrame(pr, columns=note_columns)
示例#15
0
def convert(val):
    nbr = pretty_midi.hz_to_note_number(val)
    frac = nbr % 1
    base_note = int(nbr)
    print(frac)
    print(base_note)
    notename = pretty_midi.note_number_to_name(base_note)
    if frac > 0.25 and frac < 0.75:
        if len(notename) == 2:
            symb = '+'

        if len(notename) == 3:
            notename = pretty_midi.note_number_to_name(base_note + 1)
            symb = 'd'
        notename = notename[0] + symb + notename[1]
    elif frac >= 0.75:
        notename = pretty_midi.note_number_to_name(base_note + 1)
    return notename
def check_pitch(pitch):
    """
    check string pitch format and fix it

    Parameters
    -----------
    pitch : midi string pitch

    Returns
    -----------
    pitch : midi string pitch
    """
    if len(pitch) == 4:
        base_pitch_num = pretty_midi.note_name_to_number(pitch[0] + pitch[-1])
        if pitch[1:3] == 'bb':
            pitch = pretty_midi.note_number_to_name(base_pitch_num - 2)
        if pitch[1:3] == '##':
            pitch = pretty_midi.note_number_to_name(base_pitch_num + 2)
    return pitch
示例#17
0
def interval_exam(note_cnt=2):
    notes = list()
    std_number = pretty_midi.note_name_to_number('A4')
    for i in range(note_cnt):
        note = pretty_midi.note_number_to_name(std_number + randint(-12, 12))
        while note in notes:
            note = pretty_midi.note_number_to_name(std_number + randint(-12, 12))
        notes.append(note)
    play_exam(notes)

    ans = [define.note_name_display[x] for x in notes]
    ans = ' '.join(ans)
    user_ans = input()
    if ans == user_ans:
        print('Correct!')
        return True
    else:
        print('Wrong! Correct answer is: ' + ans)
        return False
示例#18
0
 def __init__(self, midi_start=48, midi_end=108):
     """
     Args:
         midi_start (int): The first midi note in the dataset
         midi_end (int): The last midi note in the dataset
     """
     self.dtypes = {'piano_roll_name': 'object', 'timestep': 'uint32'}
     self.column_names = [pretty_midi.note_number_to_name(n) for n in range(midi_start, midi_end)]
     for column in self.column_names:
         self.dtypes[column] = 'uint8'
def number_to_note(number):
    """
    Extract note name from note number

    :param number: index of note
    :return: note name or "r" for rest
    """
    if number == 128:
        return 'r'
    else:
        return pretty_midi.note_number_to_name(number)
示例#20
0
def getJSON(filename):
    #songs=['../dataset/'+filename for filename in os.listdir('../dataset')]
    mid=MidiFile(filename)
    data=[]

    for _,track in enumerate(mid.tracks):
        abs_time=0
        for msg in track:
            abs_time+=msg.time
         
            if(msg.type=='note_on'):
                
                note=pretty_midi.note_number_to_name(msg.note)
                a={'velocity':msg.velocity,'note':note,'time':abs_time}
                data.append(a)
            elif(msg.type=='note_off'):
                
                note=pretty_midi.note_number_to_name(msg.note)
                a={'velocity':0,'note':note,'time':abs_time}
                data.append(a)
                
    
    return data
示例#21
0
def process_midi(midi_file):
    print("processing " +  midi_file)
    csv_file = midi_file.replace(".mid", ".csv")
    # Load MIDI file into PrettyMIDI object
    midi_data = pretty_midi.PrettyMIDI(midi_file)
    midi_list = []
    with open(csv_file, mode='w') as output_file:
        csv_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        csv_writer.writerow(["pitch", "start", "end", "duration", "velocity"])
        for instrument in midi_data.instruments:
            for note in instrument.notes:
                midi_list.append(Note(note.pitch,note.start,note.end,note.duration,note.velocity))
        midi_list.sort(key=attrgetter('start'))
        for note in midi_list:
            csv_writer.writerow([pretty_midi.note_number_to_name(note.pitch), "{:0>3.4f}".format(note.start), "{:0>3.4f}".format(note.end), "{:3.4f}".format(note.duration), note.velocity])
示例#22
0
    def update_inputs(instrument, technique):
        techs = list(orchestra[instrument].keys())
        techs = [{"label": i, "value": i} for i in techs]

        if technique in list(orchestra[instrument].keys()):
            tech = technique
        else:
            tech = list(orchestra[instrument].keys())[0]

        dyns = list(orchestra[instrument][tech].keys())
        dyns = [{"label": i, "value": i} for i in dyns]
        notes = list(orchestra[instrument][tech]['p'].keys())
        notes.sort()
        notes = [{
            "label": pretty_midi.note_number_to_name(int(i)),
            "value": i
        } for i in notes]
        return [techs, notes, dyns]
示例#23
0
def get_features(path):

    file = pretty_midi.PrettyMIDI(path)
    print(file)

    tempo = file.get_beats()
    n = pretty_midi.note_number_to_name(96)
    roll = file.get_piano_roll(100)

    features = [tempo]
    #print(features)
    print(n)
    print(len(roll))
    print(roll[0])
    return features  #normalize_features([tempo, num_sig_changes, resolution, ts_1, ts_2])


#get_features(path)
示例#24
0
def making_tone_from_prob_and_prevtone(prev, tone_length, length, chord):
	diff = caliculate_tone_difference(length, tone_length)
	new_tone = "0"
	if prev == "0":
		new_tone = chord[0]
		new_tone = pretty_midi.note_name_to_number(new_tone)
		prev = new_tone
	else:
		prev = pretty_midi.note_name_to_number(prev)
		# 50%で音程が上下する
		if np.random.randint(0,1)==1:
			new_tone = prev+diff
		else:
			new_tone = prev-diff
	# 2: 50%で内音, 3:75%, 4:100%で内音に
	# if tone_length < np.random.randint(1, 4):
	# 	new_tone = change_to_inner_tone(chord ,new_tone)
	new_tone = pretty_midi.note_number_to_name(new_tone)
	return new_tone
示例#25
0
def compare_piano_rolls(piano_roll1, piano_roll2, crop, title=""):
    if crop:
        labels = list(range(crop[0], crop[1] + 1))
    else:
        labels = list(range(0, 128))
    labels = [pm.note_number_to_name(x) for x in labels]

    plt.figure()

    plt.subplot(211)
    plt.imshow(piano_roll1, aspect='auto', origin='lower')
    plt.yticks([x + 0.5 for x in list(range(len(labels)))], labels, fontsize=5)
    ax = plt.gca()
    ax.grid(True, axis='y', color='black')
    plt.title(title)
    plt.subplot(212)
    plt.imshow(piano_roll2, aspect='auto', origin='lower')
    plt.yticks([x + 0.5 for x in list(range(len(labels)))], labels, fontsize=5)
    ax = plt.gca()
    ax.grid(True, axis='y', color='black')
 def _show_result_by_pretty_midi(pitches, pred_string, pred_position,
                                 pred_finger):
     print(
         "pitch".ljust(9),
         "".join([
             pretty_midi.note_number_to_name(number).rjust(4)
             for number in pitches
         ]),
     )
     print(
         "string".ljust(9),
         "".join([s.rjust(4) for s in pred_string]),
     )
     print(
         "position".ljust(9),
         "".join([p.rjust(4) for p in pred_position]),
     )
     print(
         "finger".ljust(9),
         "".join([f.rjust(4) for f in pred_finger]),
     )
示例#27
0
    def update_inputs(instrument, technique):
        techsl = list(orchestra[instrument].keys())
        techs = [{"label": i, "value": i} for i in techsl]
        score_techs = dbc.DropdownMenu(
            [
                dbc.DropdownMenuItem(
                    i, n_clicks=0, id=ddid('techs_l{}'.format(id), i))
                for i in techsl
            ]  #.insert(0, dbc.DropdownMenuItem("Woodwinds", header=True),)
            ,
            label=technique,
            caret=False,
            id='tech_sel{}'.format(id),
            color='#dde',
            toggle_style={
                'color': 'black',
                'border': 'none',
                'fontSize': 30
            },
            bs_size='sm',
            style={
                'bottom': -25,
                'textAlign': 'center',
                'paddingLeft': 15
            })

        if technique in list(orchestra[instrument].keys()):
            tech = technique
        else:
            tech = list(orchestra[instrument].keys())[0]
        dyns = list(orchestra[instrument][tech].keys())
        dyns = [{"label": i, "value": i} for i in dyns]
        notes = list(orchestra[instrument][tech]['p'].keys())
        notes.sort()
        notes = [{
            "label": pretty_midi.note_number_to_name(int(i)),
            "value": i
        } for i in notes]
        return [techs, notes, dyns, score_techs]  #score_techs
示例#28
0
def get_note_for_freq(f):
    return pretty_midi.note_number_to_name(pretty_midi.hz_to_note_number(f))
示例#29
0
def PreprocessMIDIPiano(midi_file: str, save_file: str, append_csv=False):
    '''
    Preprocess MIDI music file to data matrix for training
    :param
    midi_file:the name of the music file of MIDI format
    save_file: the name of the saving file
    :return:
    '''

    if not append_csv:
        saved_columns = [
            pretty_midi.note_number_to_name(n)
            for n in range(m_midi_start, m_midi_end)
        ]
        piano_rolls = pd.DataFrame(columns=['piano_roll_name', 'timestep'] +
                                   saved_columns)
        piano_rolls = piano_rolls.set_index(['piano_roll_name', 'timestep'])
        piano_rolls.to_csv(save_file, sep=',', encoding='utf-8')

    #Identify the music key: Major or Minor
    #semi_shift = transposer(midi_file)
    #print("shift")
    #Read midi file
    pm = pretty_midi.PrettyMIDI(midi_file)

    #Get sample freqency for data matrix(piano roll) according to the Sixteenth note ??
    sampling_freq = 1 / (pm.get_beats()[1] / 4)

    # Only deal with the MIDI pieces that have one instrument as acoustic grand piano
    #print(len(pm.instruments) == 1)
    if len(pm.instruments) > 1:
        raise Exception(
            "Sorry, deal with the MIDI pieces that have one instrument")
    instrument = pm.instruments[0]
    #print(pretty_midi.program_to_instrument_name(int(instrument.program)))
    #assert int(instrument.program) == 0

    #if the music if a major, shift its scale to C major
    #if the music if a minor, shift its scale to A minor
    #for note in instrument.notes:
    #    note.pitch += semi_shift

    #encode midi to data frame
    df = encode_dummies(instrument, sampling_freq).fillna(value=0)

    #cut the data to record the music note only from c3 to b7
    df = chopster(df)

    #trim the beginning empty sound track
    df = trim_blanks(df)

    #Ignore the strength(velocity) and regard the sound as one and non-sound as zero
    df = minister(df)

    #organize data frame and save
    df.reset_index(inplace=True, drop=True)
    df['timestep'] = df.index
    df['piano_roll_name'] = midi_file
    df = df.set_index(['piano_roll_name', 'timestep'])
    #print(df.head())
    df.to_csv(save_file, sep=',', mode='a', encoding='utf-8', header=False)
def score_graphs_from_graph_data(graph_data):
    notenames = []
    for i in range(128):
        notenames.append(pretty_midi.note_number_to_name(i))

    fig_layout = {
        'title': 'Score',
        'plot_bgcolor': 'black',
        'paper_bgcolor': 'black',
        'font': {
            'color': 'white'
        },
        'xaxis': {
            'title': 'Bar',
            # 'rangeslider': {'visible': True},
            # 'rangeselector': {'visible': True},
        },
        'yaxis': {
            'tickmode': 'array',
            'tickvals': np.arange(128),
            'ticktext': notenames,
            'range': [36, 96],
            'nticks': 10,
            'title': 'note'
        },
        'dragmode': 'pan',
        # 'showscale': False,
        # 'coloraxis_showscale': False
    }
    fig_config = {
        'displayModeBar': False,
        'scrollZoom': False,
        # 'modeBarButtons':{'zoom3d':True}
    }

    trace_template = {
        "type": "heatmap",
        "zmin": 0,
        "zmax": 1,
        'showlegend': True,
        'showscale': False,
        # 'opacity': 0.5,
    }
    ##DO GRAPHS
    scale = 2  #calculated 1/scale values, i.e. 2 = 1/2 = 0.5
    score_pianoroll = sc.Pianoroll(
        id='pianoroll_graph',
        stave_list=graph_data['stave_list'],
        bar_offset=graph_data['bar_offset'][0],
        width=len(graph_data['ticks_for_bar_start']) * 200 * scale,
        height=(len(graph_data['instrument']) * 70 / scale) + 100,
        scale=1 / scale,
        stave_spacing=70)
    score_pianoroll = html.Div(
        children=score_pianoroll,
        style={
            'backgroundColor': '#eed',
            'width':
            (len(graph_data['ticks_for_bar_start']) * 200 + 100) / scale,
            'height': (len(graph_data['instrument']) * 70 / scale) + 100
        })
    score_pianoroll = html.Div(
        id='pianoroll_container',
        children=[
            html.Div(
                'Score with the target at the top. The redness of the target means less audibility',
                style={
                    'backgroundColor': '#eed',
                    'color': 'black',
                    'fontSize': 30,
                    'textAlign': 'center'
                }), score_pianoroll
        ],
        style={
            'backgroundColor': '#eed',
            'width': '100%',
            'overflowX': 'auto'
        })
    score_pianoroll = html.Div([
        dbc.Button('download score as PNG',
                   id='pianoroll_png',
                   block=True,
                   className='downloadpng',
                   size='sm'), score_pianoroll
    ])

    fig_layout['xaxis']['tickmode'] = 'array'
    fig_layout['xaxis']['tickvals'] = graph_data['ticks_for_bar_start']
    fig_layout['xaxis']['ticktext'] = np.arange(len(graph_data['downbeats'][graph_data['bar_offset'][0]:graph_data['bar_offset'][1]])) + \
                                      graph_data['bar_offset'][0] + 1  # Do the math to get the right text

    #Set 3d camera direct above:
    camera = dict(eye=dict(x=1 * 0.5, y=0., z=2.5 *
                           0.5)  #Lower coefficient = bigger zoom
                  )
    layout3d = {
        'plot_bgcolor': 'black',
        'paper_bgcolor': 'black',
        'font': {
            'color': 'white'
        },  #'width': '1000', 'height': '500',
        'scene': {
            "aspectratio": {
                "x": 1,
                "y": 4,
                "z": 0.5
            },
            'camera': camera,
        },

        #Layout template for 2d graphs
    }
    layout2d = {
        'height': '300',
        'plot_bgcolor': 'black',
        'paper_bgcolor': 'black',
        'font': {
            'color': 'white'
        },
    }

    tickvals_enhanced = []
    k = 1
    for i in range(graph_data['score_length']):
        tickvals_enhanced.append(k)
        if i + 1 in graph_data['ticks_for_bar_start']:
            k += 1

    #Do measure numbering for 2d graphs
    layout2d['xaxis'] = dict()
    layout2d['xaxis']['tickmode'] = 'array'
    layout2d['xaxis']['tickvals'] = graph_data['ticks_for_bar_start']
    layout2d['xaxis']['ticktext'] = np.arange(
        len(graph_data['downbeats']
            [graph_data['bar_offset'][0]:graph_data['bar_offset'][1]])
    ) + graph_data['bar_offset'][0] + 1  # Do the math to get the right text

    #Do measure numbering for 3d graph
    layout3d['scene']['yaxis'] = dict()
    layout3d['scene']['yaxis']['title'] = 'Bar number'
    layout3d['scene']['yaxis']['tickmode'] = 'array'
    layout3d['scene']['yaxis']['tickvals'] = np.arange(
        graph_data['score_length'])  #ticks_for_bar_start
    layout3d['scene']['yaxis'][
        'ticktext'] = tickvals_enhanced  #np.arange(len(midi_data.get_downbeats()[bar_offset[0]:bar_offset[1]])) + bar_offset[0] + 1  # Do the math to get the right text
    layout3d['scene']['yaxis']['showgrid'] = True
    layout3d['scene']['xaxis'] = dict()
    layout3d['scene']['xaxis']['title'] = 'Critical band'
    layout3d['scene']['xaxis']['tickvals'] = np.arange(107)
    layout3d['scene']['xaxis']['ticktext'] = np.flip(constants.threshold[:, 0])
    layout3d['scene']['xaxis']['title'] = 'Masking threshold in dB'
    #layout3d['scene']['xaxis']['fixedrange'] = True

    zoom_enable = dbc.FormGroup([
        dbc.Checkbox(checked=False, id='zoom_enable'),
        dbc.Label(' enable mouse scroll wheel zoom', html_for='zoom_enable')
    ],
                                style={'textAlign': 'center'})
    graph3d = dcc.Graph(
        id='3d_graph',
        figure={
            'data': [
                go.Surface(
                    z=graph_data['orchestration_masking_curves'],
                    opacity=1,
                    reversescale=True,
                    colorscale='Spectral',
                    showscale=False,
                    name='Orchestration',
                    hovertemplate=
                    'Bar number: %{y}, Critical band: %{x}, Masking threshold %{z} dB'
                ),
                go.Surface(
                    z=graph_data['target_peaks_over_masking'],
                    opacity=1,
                    colorscale='blugrn',
                    showscale=False,
                    name='Target',
                    hovertemplate=
                    'Bar number: %{y}, Critical band: %{x}, Excitation %{z} dB'
                )
            ],
            'layout':
            layout3d
        },
        config=fig_config)
    graph3d = html.Div([zoom_enable, graph3d])
    x_axis = np.arange(graph_data['score_length'])

    def set_id(number):
        return {'type': 'a_graph', 'index': number}

    layoutA = layout2d.copy()
    layoutA['title'] = 'Target spectral peaks masked, in %'
    masking_threshold_graph = dcc.Graph(
        id=set_id(1),
        figure={
            'data': [
                go.Scatter(
                    x=x_axis,
                    y=graph_data['target_masking_percent_array'],
                    fill='tozeroy',
                    name='Target audibility',
                    line={'color': 'olive'},
                    hovertemplate=
                    '%{y} percent of target peaks masked by orchestration')
            ],
            'layout':
            layoutA
        },
        config=fig_config)
    masking_threshold_graph = html.Div([
        html.Div(
            'Click anywhere in the graph to show orchestration at current point',
            style={'textAlign': 'center'}), masking_threshold_graph
    ])

    orchestration_var_coeffs = np.array(graph_data['orchestration_var_coeffs'])
    orchestration_var_coeffs[
        orchestration_var_coeffs > 5] = 5  #Delete anomalies in var_coeff
    layoutB = layout2d.copy()
    layoutB[
        'title'] = 'Orchestration variation coefficient (homogenuity of the orchestration)'
    variation_coefficient_graph = dcc.Graph(
        id=set_id(2),
        figure={
            'data': [
                go.Scatter(x=x_axis,
                           y=orchestration_var_coeffs,
                           line={'color': 'sienna'},
                           fill='tozeroy',
                           name='Variation coefficient',
                           hovertemplate='Homogenuity coefficient: %{y}')
            ],
            'layout':
            layoutB
        },
        config=fig_config)
    variation_coefficient_graph = html.Div([
        html.Div(
            'Click anywhere in the graph to show orchestration at current point',
            style={'textAlign': 'center'}), variation_coefficient_graph
    ])
    layoutC = layout2d.copy()
    layoutC['title'] = 'Orchestration and target centroid comparison'
    centroid_graph = dcc.Graph(
        id=set_id(3),
        figure={
            'data': [
                go.Scatter(x=x_axis,
                           y=graph_data['orchestration_centroids'],
                           name='Orchestration',
                           hovertemplate='Centroid: %{y}Hz'),
                go.Scatter(x=x_axis,
                           y=graph_data['target_centroids'],
                           name='Target',
                           hovertemplate='Centroid: %{y}Hz')
            ],
            'layout':
            layoutC
        },
        config=fig_config)
    centroid_graph = html.Div([
        html.Div(
            'Click anywhere in the graph to show orchestration at current point',
            style={'textAlign': 'center'}), centroid_graph
    ])

    layoutD = layout2d.copy()
    layoutD['title'] = 'Orchestration and target color distance'
    distance_graph = dcc.Graph(
        id=set_id(4),
        figure={
            'data': [
                go.Scatter(
                    x=x_axis,
                    y=graph_data['mfcc_distance_vector'],
                    fill='tozeroy',
                    line={'color': 'moccasin'},
                    name='Color distance',
                    hovertemplate='Target distance from orchestration: %{y}')
            ],
            'layout':
            layoutD
        },
        config=fig_config)
    distance_graph = html.Div([
        html.Div(
            'Click anywhere in the graph to show orchestration at current point',
            style={'textAlign': 'center'}), distance_graph
    ])

    #masking3d=do3dgraph(midi_data, tgt, orchestration_pianoroll)

    #Disable unnecessary graph for now
    graph = dcc.Graph(
        id='midi_graph',
        figure={
            'data': graph_data['all_traces'],
            'layout': fig_layout
        },
        config=fig_config,
    )

    analyzed_material = graph_data['analyzed_material']

    return [
        score_pianoroll, masking_threshold_graph, variation_coefficient_graph,
        centroid_graph, distance_graph, graph3d, graph, analyzed_material
    ]
示例#31
0
dir = '2'

for dir_name in os.listdir(dir):
    for filename in os.listdir(dir + os.sep + dir_name):
        file_path = dir + os.sep + dir_name + os.sep + filename
        with open(file_path, 'rt') as f:
            try:
                pm = pretty_midi.PrettyMIDI(file_path)
                note_list.extend(pm.instruments[0].notes)
            except:
                continue

    note_duration = []
    note_pitch = []
    for _ in note_list:
        if _.end > _.start:
            note_duration.append(round((_.end - _.start)*8))
            note_pitch.append(pretty_midi.note_number_to_name(_.pitch)[:-1])

    pitch_dist = leaders(note_pitch)
    duration_dist = leaders(note_duration)
    print('\n', dir_name)
    for _, __ in enumerate(pitch_dist[3:] + pitch_dist[:3]):
        print("({}, {})".format(_, __[1]), end='')

    print('\n')
    for _ in duration_dist:
        if float(_[0]) <= 32:
            print("({},{})".format(_[0], _[1]), end='')
示例#32
0
文件: midix.py 项目: samhealer/magpie
#!/usr/local/bin/python
import sys
import pretty_midi
midi_data = pretty_midi.PrettyMIDI('drums.mid')


pr = midi_data.instruments[0].get_piano_roll().tolist()
pr_len = len(pr[0])
new_pr = []
print pr_len
for i in range(0,pr_len):
	tmp_pr = []
	for j in range(0,127):
		if pr[j][i] > 0:
			if midi_data.instruments[0].is_drum:
				tmp_pr.append(pretty_midi.note_number_to_drum_name(j))
			else:
				tmp_pr.append(pretty_midi.note_number_to_name(j))
	new_pr.append(tmp_pr)
print new_pr

# Synthesize the resulting MIDI data using sine waves
#audio_data = midi_data.synthesize()

print pretty_midi.note_name_to_number("C3")