Beispiel #1
0
 def __init__(self, keyboard, display, looper):
     self.pending = []  #list of notes scheduled for playing in the future
     #format of each note: (note, velocity, ticks_ms)
     self.k = keyboard
     self.d = display
     self.l = looper
     self.l.p = self
     self.midi = midi.Midi()
     self.metronome = metronome.Metronome(self.midi)
     self.scale = [60, 62, 64, 65, 67, 69, 71, 72]
     self.chord_names = ["C", "D", "E", "F", "G", "A", "B", "C"]
     self.melody_keys_transpose = bytearray(
         8)  #for keeping track of how which key was played
     self.instr = 22
     self.set_instr(self.instr)
     self.volume = 64
     self.set_volume(self.volume)
     #TODO: move the drum names to instr_names.py
     self.drums = [("Snare", 38), ("Bass drum", 36), ("Low tom", 45),
                   ("High tom", 50), ("Crash cymbal", 49),
                   ("Ride cymbal", 51), ("Open hi-hat", 46),
                   ("Closed hi-hat", 42)]
     self.chord = []  #Currently playing chord
     self.strum_chord = [
     ]  #same chord, but with enough notes to cover all strumming keys
     self.strum_mute_old = 0
     self.strum_keys_old = 0
Beispiel #2
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--config_json',
                        help='location of json configuration',
                        default='config.json')
    options = vars(parser.parse_args())
    config_json = read_config(options['config_json'])

    #url = 'https://api.openweathermap.org/data/2.5/weather?zip={}&appid={}'.format(options['zip'], options['owmkey'])
    #r = requests.get(url)
    ticks_per_quarter = config_json['midi']['ticks_per_quarter']
    midi_output = midi.Midi(ticks_per_quarter)
    trk_chk = midi.Midi.TrackChunk(ticks_per_quarter)

    # result sequence is a list containing a single list containing the resulting measures from the algorithm
    result_sequence = genetic.run(config_json)

    for measure in result_sequence[0]:
        print("Result measure: {}".format(measure))
        for note in measure.notes:
            note_length_ticks = int(ticks_per_quarter * note.note_len * 4)
            trk_chk.add_event(1, 0, note.midi_num, 96, 0)
            trk_chk.add_event(0, 0, note.midi_num, 0, note_length_ticks)
    midi_output.chunks.append(trk_chk)
    midi_output.write_to_file(config_json['midi']['output'])
Beispiel #3
0
 def init_piano(self):
     keymap_filename = 'pianoeletronico.kmp'
     notes_manager = NotesManager()
     notes_manager.load_file(util.app_file_path(os.path.join('keymaps', keymap_filename)))
     self.midi = midi.Midi()
     self.midi_output = midi.Output(self.midi.get_default_output_id(), 0)
     self.piano = Piano(notes_manager, self.midi_output)
     self.piano.set_instrument(0, 0)
Beispiel #4
0
    def __init__(self, keyboard, display, looper):
        self.pending = []  #list of notes scheduled for playing in the future
        #format of each note: (note, velocity, ticks_ms)
        self.k = keyboard
        self.d = display
        self.l = looper
        self.midi = midi.Midi()
        self.metronome = metronome.Metronome(self.midi)
        self.transpose = 0
        self.melody_keys_transpose = bytearray(
            8)  #for keeping track of how which key was played
        self.instr = 22
        self.set_instr(self.instr)
        self.volume = 64
        self.set_volume(self.volume)
        self.chord = []  #Currently playing chord
        self.strumming = False  #Enable strumming?
        self.strum_chord = [
        ]  #same chord, but with enough notes to cover all strumming keys
        self.strum_mute_old = False
        self.strum_keys_old = 0  #bitmap
        self.strum_keys_all = 0  #bitmap of all active notes
        self.default_velocity = 64

        #For continuous expression control
        self.playing_chord_key = None  #Number of the key being pressed, from 0 to 7
        self.playing_notes = 0
        self.expr1_old = self.default_velocity
        #self.expr1_time     = 0
        self.expr_bend_old = 0
        #self.expr_bend_time = 0
        self.bend_baseline = 0
        self.chord_shape_name = ""
        self.chord_sharp = 0  #-1 for bemol, +1 for sharp
        self.chord_sharp_old = 0  #same; this value corresponds to the last one displayed
        self.chord_disp_timestamp = 0

        #For melody mode
        self.melody = False
        self.melody_last_key = None
        self.melody_last_key_time = 0

        for n in range(0, 15):
            self.midi.set_controller(n, 7, 127)  #Set channel volumes to max
def main():
    g = get_note_pitch_generator()
    track_r = midi.TrackConfig('Rythm Track')
    track_l = midi.TrackConfig('Lead Track')
    m = midi.Midi([track_r, track_l])
    time = 0
    while time < 1000:
        duration, chord_desc = g.next()
        pitches = get_chord(chord_desc)
        lead_time = 0
        lg = get_lead_generator(pitches)
        while True:
            lead_duration, lead_note = lg.next()
            lead_duration = lead_duration / 2
            lead_time += lead_duration
            if lead_time > duration:
                break
            m.append_note(lead_duration, [lead_note], track=1)
        time = m.append_note(duration, pitches)
    m.write_file('out.mid')
    return 0
Beispiel #6
0
    def on_file_open_activate(self, menuitem):
        webmsrc = self.player.get_by_name('webmsrc')
        midisrc = self.player.get_by_name('midisrc')

        open_dialog = self.builder.get_object('open_dialog')
        progress_bar = self.builder.get_object('progressing_bar')
        hint_label = self.builder.get_object('hint_label')

        response = open_dialog.run()
        open_dialog.hide()
        if response == Gtk.ResponseType.OK:
            self.duration = Gst.CLOCK_TIME_NONE
            source = open_dialog.get_filename()
            progress_bar.set_fraction(0)
            hint_label.set_text('正在解析 MIDI 檔案為影片...')

            self.set_window_sensitive(False)

            def update_progress_bar(clip):
                progress = progress_bar.get_fraction() + 1 / clip.nframes
                progress_bar.set_fraction(progress)
                while Gtk.events_pending():
                    Gtk.main_iteration()

            sheet = midi.Midi(source)
            clip = video.midi_videoclip(sheet,
                                        iter_callback=update_progress_bar)
            clip.write_videofile('tmp.webm', codec='libvpx', fps=20)
            os.rename('tmp.webm',
                      'tmp.webm~')  # MoviePy disallows illegal file extension
            webmsrc.set_property('location', 'tmp.webm~')
            midisrc.set_property('location', source)

            self.set_window_sensitive(True)

            progress_bar.set_fraction(1)
            hint_label.set_visible(False)
            self.player.get_by_name('gtksink').props.widget.show()
        elif response == Gtk.ResponseType.CANCEL:
            return
Beispiel #7
0
def run(mp3_file_path):

    os.makedirs('static', exist_ok=True)

    logger.info(
        'run function mp3_file_path argument : {}'.format(mp3_file_path))

    voca = False  # True means large vocabulary label type
    if voca == True:
        config.feature['large_voca'] = True
        config.model['num_chords'] = 170
        model_file = 'test/btc_model_large_voca.pt'
        idx_to_chord = idx2voca_chord()
        logger.info("label type: large voca")
    else:
        model_file = 'test/btc_model.pt'
        idx_to_chord = idx2chord
        logger.info("label type: Major and minor")

    model = BTC_model(config=config.model).to(device)

    # Load model
    if os.path.isfile(model_file):
        checkpoint = torch.load(model_file, map_location='cpu')
        mean = checkpoint['mean']
        std = checkpoint['std']
        model.load_state_dict(checkpoint['model'])
        logger.info("restore model")

    # clean mp3 filename
    base_path, song_name = os.path.split(mp3_file_path)
    new_name = "".join(x for x in song_name[:-4] if x.isalnum())
    new_path = os.path.join(base_path, new_name + '.mp3')
    shutil.move(mp3_file_path, new_path)
    filename = new_path[:-4]
    logger.info('cleaned filename : {}'.format(filename))

    # load mp3 and get features
    feature, feature_per_second, song_length_second = audio_file_to_features(
        new_path, config)
    logger.info("audio file loaded and feature computation success")

    # Majmin type chord recognition
    feature = feature.T
    feature = (feature - mean) / std
    time_unit = feature_per_second
    n_timestep = config.model['timestep']

    num_pad = n_timestep - (feature.shape[0] % n_timestep)
    feature = np.pad(feature, ((0, num_pad), (0, 0)),
                     mode="constant",
                     constant_values=0)
    num_instance = feature.shape[0] // n_timestep

    start_time = 0.0
    lines = []
    with torch.no_grad():
        model.eval()
        feature = torch.tensor(feature,
                               dtype=torch.float32).unsqueeze(0).to(device)
        for t in range(num_instance):
            self_attn_output, _ = model.self_attn_layers(
                feature[:, n_timestep * t:n_timestep * (t + 1), :])
            prediction, _ = model.output_layer(self_attn_output)
            prediction = prediction.squeeze()
            for i in range(n_timestep):
                if t == 0 and i == 0:
                    prev_chord = prediction[i].item()
                    continue
                if prediction[i].item() != prev_chord:
                    lines.append(
                        '%.6f %.6f %s\n' %
                        (start_time, time_unit *
                         (n_timestep * t + i), idx_to_chord[prev_chord]))
                    start_time = time_unit * (n_timestep * t + i)
                    prev_chord = prediction[i].item()
                if t == num_instance - 1 and i + num_pad == n_timestep:
                    if start_time != time_unit * (n_timestep * t + i):
                        lines.append(
                            '%.6f %.6f %s\n' %
                            (start_time, time_unit *
                             (n_timestep * t + i), idx_to_chord[prev_chord]))
                    break

    # lab file write
    # test_result_path = 'test/{}.lab'.format(filename)
    test_result_path = './{}.lab'.format(filename)
    with open(test_result_path, 'w') as f:
        for line in lines:
            f.write(line)

    logger.info("label file saved")

    # read in lab file into dataframe
    logger.info('read in label file to pandas dataframe')
    df = pd.read_csv('./{}.lab'.format(filename), header=None, delimiter=' ')

    df.columns = ['start', 'stop', 'chord']

    # calculate chord duration
    df['duration'] = df.stop - df.start

    # discard the first and last non chords
    df = df.iloc[1:-2].copy(deep=True)

    # set any non-chords to the previous chord
    for index in df.index.values:

        chord_ = df.at[index, 'chord']

        if chord_ == 'N':

            timestamp = df.at[index, 'stop']
            df.at[index - 1, 'stop'] = timestamp
            df.drop(index, inplace=True)

    logger.info('start processing the chords into midi')
    try:
        s1 = stream.Stream()
        s1.append(chord.Chord(["C4", "G4", "E-5"]))
        for index in df.index.values[1:20]:  #[1:-2]:
            chord_ = df.at[index, 'chord']
            kind = 'major'
            if ':min' in chord_:
                kind = 'minor'
            chord_ = chord_.split(':min')[0]
            # multiply duration by 2. Don't know why this works but it does
            duration_ = 2 * df.at[index, 'duration']
            chord21 = harmony.ChordSymbol(root=chord_,
                                          kind=kind,
                                          duration=duration_)
            chord21.writeAsChord = True
            s1.append(chord21)

    except Exception as e:
        logger.info(e)

    logger.info('complete')

    logger.info('save midi to disk')
    fp = s1.write('midi', fp='{}.mid'.format(filename))

    # read in midi
    sheet = midi.Midi('{}.mid'.format(filename))
    # get the video representation
    clip = video.midi_videoclip(sheet)
    # save the video without audio
    clip.write_videofile('{}.webm'.format(filename), codec='libvpx', fps=20)

    os.makedirs('sf2', exist_ok=True)

    # download the libsynth soundfont if it doesn't exist
    if not os.path.exists('sf2/FluidR3_GM.sf2'):

        cmd = 'wget -O sf2/FluidR3_GM.sf2 https://github.com/urish/cinto/raw/master/media/FluidR3%20GM.sf2'
        subprocess.call(cmd, shell=True)

    # load the soundfont
    fs = FluidSynth('sf2/FluidR3_GM.sf2')  # arch

    # convert the midi to audio
    fs.midi_to_audio('{}.mid'.format(filename), '{}.wav'.format(filename))

    # combine the audio and video
    cmd = 'ffmpeg -y -i {}.wav  -r 30 -i {}.webm  -filter:a aresample=async=1 -c:a flac -c:v copy {}.mkv'.format(
        filename, filename, filename)
    subprocess.call(cmd, shell=True)  # "Muxing Done

    # delay the audio by 4% (this aligns the audio to the video)
    cmd = 'ffmpeg -i {}.mkv -filter:a "atempo=0.96" -vn -y {}.wav'.format(
        filename, filename)
    subprocess.call(cmd, shell=True)

    # strip the path to get the filename
    filename_only = os.path.splitext(os.path.basename(filename))[0]

    # combine the video and the delayed audio
    cmd = 'ffmpeg -y -i {}.wav  -r 30 -i {}.webm  -filter:a aresample=async=1 -c:a flac -c:v copy static/{}.mkv'.format(
        filename, filename, filename_only)
    subprocess.call(cmd, shell=True)  # "Muxing Done
    logger.info('Muxing Done')

    return 'static/{}.mkv'.format(filename_only)
Beispiel #8
0
            devices[_fd].ungrab()
        except IOError:
            print "Already ungrabbed."
    if not headless:
        disp.close()
    print "Thank you for the music!"
    print " "
    sys.exit()
    return


## Initialize toggle variables
share_sust = 1

## Midi setup
midi = md.Midi()

## Display setup
headless = False
if (len(sys.argv) > 1) and (str(sys.argv[1]) == 'headless'):
    headless = True
else:
    disp = display.Display()
    disp.setup(fullscreen=0)

## Memory setup
mem = 1
inst_mem = list()
base_mem = list()
vol_mem = list()
vel_mem = list()
Beispiel #9
0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "4"

batch_size = 1024
epoches = 500
no_cuda = False
seed = 1
weight_decay = 0.1
sing_train_path = 'new_data/sing_30_days'
sing_test_path = 'new_data/sing_test_7_days'
listen_path = 'new_data/listen_30_days'
midi_path = 'new_data/midi.out'

data_loader = data.Data(sing_train_path, sing_test_path, listen_path,
                        batch_size)
midi_data = midi.Midi(midi_path)

# n_factors = 40
model = bpr_lstm.PMF(n_users=data_loader.usr_num,
                     n_items=data_loader.song_num,
                     n_factors=40,
                     no_cuda=no_cuda)

model.cuda()

lstm = model_lstm.MidiLstm(embedding_dim=100,
                           hidden_dim=100,
                           pitch_size=80,
                           nlayers=2,
                           d=40)
lstm.cuda()