Ejemplo n.º 1
0
def nptensor_to_midi(x_data, out_filename):
    midi = MidiOutFile(out_filename)

    config = nn_config.get_neural_net_configuration()
    model_basename = config['model_basename']
    hidden_dims = config['hidden_dimension_size']
    num_dims = config['num_dims']

    # non optional midi framework
    midi.header(format=0, nTracks=1, division=10)
    midi.start_of_track()

    midi.tempo(500000)

    # musical events

    no = 0
    vel = 0
    last_event_time = 0
    now_playing_note = []
    num_seq_len = len(x_data)
    time_table = [2, 5, 10, 15, 20, 30, 40]

    for i in range(num_seq_len):
        note_vector_index = np.argwhere(x_data[i] == 1)

        if x_data[i][num_dims - 1] == 1:
            no = 0
        else:
            for one_note in note_vector_index:
                time = int(one_note / 720)  #no, vel
                vel = (int(one_note % 720 / 80) + 1) * 10
                no = int(one_note) % 720 % 80 + 21
                midi.update_time(last_event_time)
                midi.note_on(channel=0, note=no, velocity=vel)
                last_event_time = 0
                now_playing_note.append([i, no, time_table[time]])

        for now_note in now_playing_note:
            if i - now_note[0] == now_note[2]:
                midi.update_time(last_event_time)
                midi.note_off(channel=0, note=now_note[1])
                last_event_time = 0
                del now_note

        last_event_time += 1

    # non optional midi framework
    midi.update_time(0)

    midi.end_of_track()  # not optional!

    midi.eof()

    print('good')
Ejemplo n.º 2
0
recv_file = './test/' + 'veryveryvery.mp3'
recv_file_tmp = './test/' + 'received_music_tmp.mp3'
recv_file_wav = './test/' + 'received_music_wav.wav'
recv_file_mid = './test/' + 'received_music_mid.mid'
send_file = './test/' + 'sending_music.mp3'
send_file_wav = './test/' + 'sending_music_wav.wav'
send_file_mid = './test/' + 'sending_music_mid.mid'
x_file = './test/'

start_time = 00
end_time = 10
finish_time = 30

# Figure out how many frequencies we have in the data
# freq_space_dims = X_train.shape[2]
config = nn_config.get_neural_net_configuration()
model_basename = config['model_basename']
hidden_dims = config['hidden_dimension_size']
num_dims = config['num_dims']
# Creates a lstm network
model = network_utils.create_lstm_network(num_frequency_dimensions=num_dims,
                                          num_hidden_dimensions=hidden_dims)



#2phase
#lame  mp3 -> wav

sample_freq_str = "{0:.1f}".format(float(44100) / 1000.0)
#        cmd = 'ffmpeg -i {0} -ar 44100 -ac 1 -c:a libmp3lame {1}'.format(recv_file, recv_file_tmp)
cmd = 'ffmpeg -i {0} -ss {1} -t {2} -ac 1 {3}'.format(recv_file, start_time, end_time - start_time, recv_file_tmp)