def generate_random_songs(decoder, write_dir, random_vectors): """ Generate random songs using random latent vectors. :param decoder: :param write_dir: :param random_vectors: :return: """ for i in range(random_vectors.shape[0]): random_latent_x = random_vectors[i:i + 1] y_song = decoder([random_latent_x, 0])[0] midi_utils.samples_to_midi( y_song[0], write_dir + 'random_vectors' + str(i) + '.mid', 32)
def train(samples_path='data/interim/samples.npy', lengths_path='data/interim/lengths.npy', epochs_qty=EPOCHS_QTY, learning_rate=LEARNING_RATE): """ Train model. :return: """ # Create folders to save models into if not os.path.exists('results'): os.makedirs('results') if WRITE_HISTORY and not os.path.exists('results/history'): os.makedirs('results/history') # Load dataset into memory print("Loading Data...") if not os.path.exists(samples_path) or not os.path.exists(lengths_path): print('No input data found, run preprocess_songs.py first.') exit(1) y_samples = np.load(samples_path) y_lengths = np.load(lengths_path) samples_qty = y_samples.shape[0] songs_qty = y_lengths.shape[0] print("Loaded " + str(samples_qty) + " samples from " + str(songs_qty) + " songs.") print(np.sum(y_lengths)) assert (np.sum(y_lengths) == samples_qty) print("Preparing song samples, padding songs...") x_shape = (songs_qty * NUM_OFFSETS, 1) # for embedding x_orig = np.expand_dims(np.arange(x_shape[0]), axis=-1) y_shape = (songs_qty * NUM_OFFSETS, MAX_WINDOWS) + y_samples.shape[ 1:] # (songs_qty, max number of windows, window pitch qty, window beats per measure) y_orig = np.zeros(y_shape, dtype=np.float32) # prepare dataset array # fill in measure of songs into input windows for network song_start_ix = 0 song_end_ix = y_lengths[0] for song_ix in range(songs_qty): for offset in range(NUM_OFFSETS): ix = song_ix * NUM_OFFSETS + offset # calculate the index of the song with its offset song_end_ix = song_start_ix + y_lengths[song_ix] # get song end ix for window_ix in range( MAX_WINDOWS ): # get a maximum number of measures from a song song_measure_ix = (window_ix + offset) % y_lengths[ song_ix] # chosen measure of song to be placed in window (modulo song length) y_orig[ix, window_ix] = y_samples[ song_start_ix + song_measure_ix] # move measure into window song_start_ix = song_end_ix # new song start index is previous song end index assert (song_end_ix == samples_qty) x_train = np.copy(x_orig) y_train = np.copy(y_orig) # copy some song from the samples and write it to midi again test_ix = 0 y_test_song = np.copy(y_train[test_ix:test_ix + 1]) x_test_song = np.copy(x_train[test_ix:test_ix + 1]) midi_utils.samples_to_midi(y_test_song[0], 'data/interim/gt.mid') # create model if CONTINUE_TRAIN or GENERATE_ONLY: print("Loading model...") model = load_model('results/history/model.h5') else: print("Building model...") model = models.create_autoencoder_model( input_shape=y_shape[1:], latent_space_size=LATENT_SPACE_SIZE, dropout_rate=DROPOUT_RATE, max_windows=MAX_WINDOWS, batchnorm_momentum=BATCHNORM_MOMENTUM, use_vae=USE_VAE, vae_b1=VAE_B1, use_embedding=USE_EMBEDDING, embedding_input_shape=x_shape[1:], embedding_shape=x_train.shape[0]) if USE_VAE: model.compile(optimizer=Adam(lr=learning_rate), loss=vae_loss) #elif params.encode_volume: #model.compile(optimizer=RMSprop(lr=learning_rate), loss='mean_squared_logarithmic_error') else: model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy') #model.compile(optimizer=RMSprop(lr=learning_rate), loss='mean_squared_error') # plot model with graphvis if installed #try: # plot_model(model, to_file='results/model.png', show_shapes=True) #except OSError as e: # print(e) # train print("Referencing sub-models...") decoder = K.function( [model.get_layer('decoder').input, K.learning_phase()], [model.layers[-1].output]) encoder = Model(inputs=model.input, outputs=model.get_layer('encoder').output) random_vectors = np.random.normal(0.0, 1.0, (NUM_RAND_SONGS, LATENT_SPACE_SIZE)) np.save('data/interim/random_vectors.npy', random_vectors) if GENERATE_ONLY: print("Generating songs...") generate_normalized_random_songs(x_orig, y_orig, encoder, decoder, random_vectors, 'results/') for save_epoch in range(20): x_test_song = x_train[save_epoch:save_epoch + 1] y_song = model.predict(x_test_song, batch_size=BATCH_SIZE)[0] midi_utils.samples_to_midi(y_song, 'results/gt' + str(save_epoch) + '.mid') exit(0) save_training_config(songs_qty, model, learning_rate) print("Training model...") train_loss = [] offset = 0 for epoch in range(epochs_qty): print("Training epoch: ", epoch, "of", epochs_qty) if USE_EMBEDDING: history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=1) else: # produce songs from its samples with a different starting point of the song each time song_start_ix = 0 for song_ix in range(songs_qty): song_end_ix = song_start_ix + y_lengths[song_ix] for window_ix in range(MAX_WINDOWS): song_measure_ix = (window_ix + offset) % y_lengths[song_ix] y_train[song_ix, window_ix] = y_samples[song_start_ix + song_measure_ix] #if params.encode_volume: #y_train[song_ix, window_ix] /= 100.0 song_start_ix = song_end_ix assert (song_end_ix == samples_qty) offset += 1 history = model.fit(y_train, y_train, batch_size=BATCH_SIZE, epochs=1) # train model on reconstruction loss # store last loss loss = history.history["loss"][-1] train_loss.append(loss) print("Train loss: " + str(train_loss[-1])) if WRITE_HISTORY: plot_losses(train_loss, 'results/history/losses.png', True) else: plot_losses(train_loss, 'results/losses.png', True) # save model periodically save_epoch = epoch + 1 if save_epoch in EPOCHS_TO_SAVE or (save_epoch % 100 == 0) or save_epoch == epochs_qty: write_dir = '' if WRITE_HISTORY: # Create folder to save models into write_dir += 'results/history/e' + str(save_epoch) if not os.path.exists(write_dir): os.makedirs(write_dir) write_dir += '/' model.save('results/history/model.h5') else: model.save('results/model.h5') print("...Saved.") if USE_EMBEDDING: y_song = model.predict(x_test_song, batch_size=BATCH_SIZE)[0] else: y_song = model.predict(y_test_song, batch_size=BATCH_SIZE)[0] plot_utils.plot_samples(write_dir + 'test', y_song) midi_utils.samples_to_midi(y_song, write_dir + 'test.mid') generate_normalized_random_songs(x_orig, y_orig, encoder, decoder, random_vectors, write_dir) print("...Done.")
def play(): global mouse_pressed global current_notes global audio_pause global needs_update global current_params global prev_mouse_pos global audio_reset global instrument print("Keras version: " + keras.__version__) K.set_image_data_format('channels_first') print("Loading encoder...") model = load_model(dir_name + 'model.h5') encoder = Model(inputs=model.input, outputs=model.get_layer('encoder').output) decoder = K.function( [model.get_layer('decoder').input, K.learning_phase()], [model.layers[-1].output]) print("Loading gaussian/pca statistics...") latent_means = np.load(dir_name + sub_dir_name + 'latent_means.npy') latent_stds = np.load(dir_name + sub_dir_name + 'latent_stds.npy') latent_pca_values = np.load(dir_name + sub_dir_name + 'latent_pca_values.npy') latent_pca_vectors = np.load(dir_name + sub_dir_name + 'latent_pca_vectors.npy') print("Loading songs...") y_samples = np.load('data/interim/samples.npy') y_lengths = np.load('data/interim/lengths.npy') # open a window pygame.init() pygame.font.init() screen = pygame.display.set_mode((int(window_w), int(window_h))) notes_surface = screen.subsurface((notes_x, notes_y, notes_w, notes_h)) pygame.display.set_caption('Neural Composer') # start the audio stream audio_stream = audio.open(format=audio.get_format_from_width(2), channels=1, rate=sample_rate, output=True, stream_callback=audio_callback) audio_stream.start_stream() # main loop running = True random_song_ix = 0 cur_len = 0 apply_controls() while running: # process events for event in pygame.event.get(): if event.type == pygame.QUIT: # QUIT BUTTON HIT running = False break elif event.type == pygame.MOUSEBUTTONDOWN: # MOUSE BUTTON DOWN if pygame.mouse.get_pressed()[0]: prev_mouse_pos = pygame.mouse.get_pos() update_mouse_click(prev_mouse_pos) update_mouse_move(prev_mouse_pos) elif pygame.mouse.get_pressed()[2]: current_params = np.zeros((num_params, ), dtype=np.float32) needs_update = True elif event.type == pygame.MOUSEBUTTONUP: # MOUSE BUTTON UP mouse_pressed = 0 prev_mouse_pos = None elif event.type == pygame.MOUSEMOTION and mouse_pressed > 0: # MOUSE MOTION WHILE PRESSED update_mouse_move(pygame.mouse.get_pos()) elif event.type == pygame.KEYDOWN: if event.key == pygame.K_r: # KEYDOWN R # generate random song current_params = np.clip( np.random.normal(0.0, 1.0, (num_params, )), -num_sigmas, num_sigmas) needs_update = True audio_reset = True if event.key == pygame.K_e: # KEYDOWN E # generate random song with larger variance current_params = np.clip( np.random.normal(0.0, 2.0, (num_params, )), -num_sigmas, num_sigmas) needs_update = True audio_reset = True if event.key == pygame.K_o: # KEYDOWN O # check how well the autoencoder can reconstruct a random song print("Random Song Index: " + str(random_song_ix)) if is_ae: example_song = y_samples[cur_len:cur_len + num_measures] current_notes = example_song * 255 latent_x = encoder.predict(np.expand_dims( example_song, 0), batch_size=1)[0] cur_len += y_lengths[random_song_ix] random_song_ix += 1 else: random_song_ix = np.array([random_song_ix], dtype=np.int64) latent_x = encoder.predict(random_song_ix, batch_size=1)[0] random_song_ix = (random_song_ix + 1) % model.layers[0].input_dim if use_pca: current_params = np.dot( latent_x - latent_means, latent_pca_vectors.T) / latent_pca_values else: current_params = (latent_x - latent_means) / latent_stds needs_update = True audio_reset = True if event.key == pygame.K_m: # KEYDOWN M # save song as midi audio_pause = True audio_reset = True midi_utils.samples_to_midi(current_notes, 'results/live.mid', note_threshold) audio_pause = False if event.key == pygame.K_w: # KEYDOWN W # save song as wave audio_pause = True audio_reset = True save_audio = b'' while True: save_audio += audio_callback(None, 1024, None, None)[0] if audio_time == 0: break wave_output = wave.open('results/live.wav', 'w') wave_output.setparams( (1, 2, sample_rate, 0, 'NONE', 'not compressed')) wave_output.writeframes(save_audio) wave_output.close() audio_pause = False if event.key == pygame.K_ESCAPE: # KEYDOWN ESCAPE # exit application running = False break if event.key == pygame.K_SPACE: # KEYDOWN SPACE # toggle pause/play audio audio_pause = not audio_pause if event.key == pygame.K_TAB: # KEYDOWN TAB # reset audio playing audio_reset = True if event.key == pygame.K_1: # KEYDOWN 1 # play instrument 0 instrument = 0 if event.key == pygame.K_2: # KEYDOWN 2 # play instrument 1 instrument = 1 if event.key == pygame.K_3: # KEYDOWN 3 # play instrument 2 instrument = 2 if event.key == pygame.K_4: # KEYDOWN 4 # play instrument 3 instrument = 3 if event.key == pygame.K_c: # KEYDOWN C # y = np.expand_dims( np.where(current_notes > note_threshold, 1, 0), 0) latent_x = encoder.predict(y)[0] if use_pca: current_params = np.dot( latent_x - latent_means, latent_pca_vectors.T) / latent_pca_values else: current_params = (latent_x - latent_means) / latent_stds needs_update = True # check if params were changed so that a new song should be generated if needs_update: if use_pca: latent_x = latent_means + np.dot( current_params * latent_pca_values, latent_pca_vectors) else: latent_x = latent_means + latent_stds * current_params latent_x = np.expand_dims(latent_x, axis=0) y = decoder([latent_x, 0])[0][0] current_notes = (y * 255.0).astype(np.uint8) needs_update = False # draw GUI to the screen screen.fill(background_color) draw_notes(screen, notes_surface) draw_sliders(screen) draw_controls(screen) # flip the screen buffer pygame.display.flip() pygame.time.wait(10) # if app is exited, close the audio stream audio_stream.stop_stream() audio_stream.close() audio.terminate()
def play(): global mouse_pressed global current_notes global audio_pause global needs_update global current_params global prev_mouse_pos global audio_reset global instrument global songs_loaded global autosavenow global autosavenum global autosave global blend global blendstate global blendfactor global keyframe_params global keyframe_controls global keyframe_paths global cur_controls global keyframe_magnitudes global blend_slerp print("Keras version: " + keras.__version__) K.set_image_data_format('channels_first') print("Loading encoder...") model = load_model(dir_name + 'model.h5') encoder = Model(inputs=model.input, outputs=model.get_layer('encoder').output) decoder = K.function([model.get_layer('decoder').input, K.learning_phase()], [model.layers[-1].output]) print("Loading gaussian/pca statistics...") latent_means = np.load(dir_name + sub_dir_name + '/latent_means.npy') latent_stds = np.load(dir_name + sub_dir_name + '/latent_stds.npy') latent_pca_values = np.load( dir_name + sub_dir_name + '/latent_pca_values.npy') latent_pca_vectors = np.load( dir_name + sub_dir_name + '/latent_pca_vectors.npy') # open a window pygame.init() pygame.font.init() screen = pygame.display.set_mode((int(window_w), int(window_h))) notes_surface = screen.subsurface((notes_x, notes_y, notes_w, notes_h)) pygame.display.set_caption('Neural Composer') # start the audio stream audio_stream = audio.open( format=audio.get_format_from_width(2), channels=1, rate=sample_rate, output=True, stream_callback=audio_callback) audio_stream.start_stream() # main loop running = True random_song_ix = 0 cur_len = 0 blendcycle = 0 apply_controls() while running: # process events if autosavenow: # generate random song current_params = np.clip(np.random.normal( 0.0, 1.0, (num_params,)), -num_sigmas, num_sigmas) needs_update = True audio_reset = True # save slider values with open("results/history/autosave" + str(autosavenum)+".txt", "w") as text_file: text_file.write(sub_dir_name + "\n") text_file.write(str(instrument) + "\n") for iter in cur_controls: text_file.write(str(iter) + "\n") for iter in current_params: text_file.write(str(iter) + "\n") # save song as wave audio_pause = True audio_reset = True save_audio = b'' while True: save_audio += audio_callback(None, 1024, None, None)[0] if audio_time == 0: break wave_output = wave.open('results/history/autosave' + str(autosavenum)+'.wav', 'w') wave_output.setparams( (1, 2, sample_rate, 0, 'NONE', 'not compressed')) wave_output.writeframes(save_audio) wave_output.close() audio_pause = False autosavenum += 1 autosavenow = False needs_update = True audio_reset = True blendcycle += 1 if blend and blendcycle > 10: blendcycle = 0 if blendstate%2 == 0: needs_update = True current_params = np.copy(keyframe_params[int(blendstate/2)]) cur_controls = np.copy(keyframe_controls[int(blendstate/2)]) apply_controls() elif blendstate%2 == 1: for x in range(0,len(current_params)): current_params[x] = (blendfactor * keyframe_params[int(blendstate/2),x]) + ((1-blendfactor)*keyframe_params[((int(blendstate/2))+1)%len(keyframe_paths),x]) if blend_slerp: magnitude = (blendfactor * keyframe_magnitudes[int(blendstate/2)]) + ((1-blendfactor)*keyframe_magnitudes[((int(blendstate/2))+1)%len(keyframe_paths)]) current_params = current_params * ((sum(current_params*current_params)**-0.5) * magnitude) for x in range(0,len(cur_controls)): cur_controls[x] = (blendfactor * keyframe_controls[int(blendstate/2),x]) + ((1-blendfactor)*keyframe_controls[((int(blendstate/2))+1)%len(keyframe_paths),x]) apply_controls() needs_update = True for event in pygame.event.get(): if event.type == pygame.QUIT: # QUIT BUTTON HIT running = False break elif event.type == pygame.MOUSEBUTTONDOWN: # MOUSE BUTTON DOWN if pygame.mouse.get_pressed()[0]: prev_mouse_pos = pygame.mouse.get_pos() update_mouse_click(prev_mouse_pos) update_mouse_move(prev_mouse_pos) elif pygame.mouse.get_pressed()[2]: current_params = np.zeros((num_params,), dtype=np.float32) needs_update = True elif event.type == pygame.MOUSEBUTTONUP: # MOUSE BUTTON UP mouse_pressed = 0 prev_mouse_pos = None elif event.type == pygame.MOUSEMOTION and mouse_pressed > 0: # MOUSE MOTION WHILE PRESSED update_mouse_move(pygame.mouse.get_pos()) elif event.type == pygame.KEYDOWN: if event.key == pygame.K_r: # KEYDOWN R # generate random song current_params = np.clip(np.random.normal( 0.0, 1.0, (num_params,)), -num_sigmas, num_sigmas) needs_update = True audio_reset = True if event.key == pygame.K_t: # KEYDOWN T for x in range(int(num_params/3)+1, num_params): current_params[x] = np.clip(np.random.normal(0.0,1.0), -num_sigmas, num_sigmas) needs_update = True if event.key == pygame.K_x: # KEYDOWN X # generate random song current_params += np.clip(np.random.normal( 0.0, 0.3, (num_params,)), -num_sigmas, num_sigmas) needs_update = True if event.key == pygame.K_a: # KEYDOWN A autosave = not autosave if event.key == pygame.K_b: # KEYDOWN B blend = not blend blendstate = 0 blendfactor = 1.0 if blend: audio_pause = True audio_reset = True needs_update = True blendnum = int(input("The number of songs to be blended ")) keyframe_paths = [] keyframe_controls = np.zeros((blendnum,len(cur_controls)),dtype=np.float32) keyframe_params = np.zeros((blendnum,num_params),dtype=np.float32) for y in range(blendnum): fileName = input("The file name of the next song to be blended ") if "." not in fileName: fileName = fileName + ".txt" keyframe_paths.append((fileName)) fo = open("results/history/" + fileName, "r") if not sub_dir_name == fo.readline()[:-1]: running = false print("incompatable with current model") break instrument = int(fo.readline()) for x in range(len(cur_controls)): keyframe_controls[y,x] = float(fo.readline()) for x in range(len(current_params)): keyframe_params[y,x] = float(fo.readline()) #keyframe_magnitudes[y] = sum(keyframe_params[y]*keyframe_params[y])**0.5 if event.key == pygame.K_e: # KEYDOWN E # generate random song with larger variance current_params = np.clip(np.random.normal(0.0, 2.0, (num_params,)), -num_sigmas, num_sigmas) needs_update = True audio_reset = True if event.key == pygame.K_PERIOD: current_params /= 1.1 needs_update = True if event.key == pygame.K_COMMA: current_params *= 1.1 needs_update = True if event.key == pygame.K_SLASH: current_params *= -1 needs_update = True if event.key == pygame.K_UP: cur_controls[0] = (210.0 - note_threshold + 1) / 200 apply_controls() if event.key == pygame.K_DOWN: cur_controls[0] = (210.0 - note_threshold - 1) / 200 apply_controls() if event.key == pygame.K_s: # KEYDOWN S # save slider values audio_pause = True fileName = input("File Name to save into ") if "." not in fileName: fileName = fileName + ".txt" with open("results/history/" + fileName, "w") as text_file: if blend: text_file.write(sub_dir_name + "\n") text_file.write("blended song" + "\n") text_file.write(str(len(keyframe_paths)) + "\n") for x in range(len(keyframe_paths)): text_file.write("" + keyframe_paths[x] + "\n") else: text_file.write(sub_dir_name + "\n") text_file.write(str(instrument) + "\n") for iter in cur_controls: text_file.write(str(iter) + "\n") for iter in current_params: text_file.write(str(iter) + "\n") if event.key == pygame.K_l: # KEYDOWN L audio_pause = True needs_update = True audio_reset = True fileName = input("File Name to read ") if "." not in fileName: fileName = fileName + ".txt" fo = open("results/history/" + fileName, "r") print (fo.name) if not sub_dir_name == fo.readline()[:-1]: running = false print("incompatable with current model") break tempDir = fo.readline() if tempDir.startswith("blended song"): blend = True blendnum = int(fo.readline()) keyframe_paths = [] keyframe_controls = np.zeros((blendnum,len(cur_controls)),dtype=np.float32) keyframe_params = np.zeros((blendnum,num_params),dtype=np.float32) for y in range(blendnum): fileName2 = fo.readline()[:-1] keyframe_paths.append(fileName) fo2 = open("results/history/" + fileName2, "r") if not sub_dir_name == fo2.readline()[:-1]: running = false print("incompatable with current model") break instrument = int(fo2.readline()) for x in range(len(cur_controls)): keyframe_controls[y,x] = float(fo2.readline()) for x in range(len(current_params)): keyframe_params[y,x] = float(fo2.readline()) else: instrument = int(tempDir) for x in range(len(cur_controls)): cur_controls[x] = float(fo.readline()) for x in range(len(current_params)): current_params[x] = float(fo.readline()) apply_controls() if event.key == pygame.K_o: # KEYDOWN O if not songs_loaded: print("Loading songs...") try: y_samples = np.load('data/interim/samples.npy') y_lengths = np.load('data/interim/lengths.npy') songs_loaded = True except Exception as e: print("This functionality is to check if the model training went well by reproducing an original song. " "The composer could not load samples and lengths from model training. " "If you have the midi files, the model was trained with, process them by using" " the preprocess_songs.py to find the requested files in data/interim " "(Load exception: {0}".format(e)) if songs_loaded: # check how well the autoencoder can reconstruct a random song print("Random Song Index: " + str(random_song_ix)) if is_ae: example_song = y_samples[cur_len:cur_len + num_measures] current_notes = example_song * 255 latent_x = encoder.predict(np.expand_dims( example_song, 0), batch_size=1)[0] cur_len += y_lengths[random_song_ix] random_song_ix += 1 else: random_song_ix = np.array( [random_song_ix], dtype=np.int64) latent_x = encoder.predict( random_song_ix, batch_size=1)[0] random_song_ix = ( random_song_ix + 1) % model.layers[0].input_dim if use_pca: current_params = np.dot( latent_x - latent_means, latent_pca_vectors.T) / latent_pca_values else: current_params = ( latent_x - latent_means) / latent_stds needs_update = True audio_reset = True if event.key == pygame.K_m: # KEYDOWN M # save song as midi audio_pause = True audio_reset = True fileName = input("File Name to save into ") if "." not in fileName: fileName = fileName + ".mid" midi_utils.samples_to_midi( current_notes, 'results/history/' + fileName, note_threshold) audio_pause = False if event.key == pygame.K_w: # KEYDOWN W # save song as wave audio_pause = True audio_reset = True fileName = input("File Name to save into ") if "." not in fileName: fileName = fileName + ".wav" save_audio = b'' while True: save_audio += audio_callback(None, 1024, None, None)[0] if audio_time == 0: break wave_output = wave.open('results/history/' + fileName + '.wav', 'w') wave_output.setparams( (1, 2, sample_rate, 0, 'NONE', 'not compressed')) wave_output.writeframes(save_audio) wave_output.close() audio_pause = False if event.key == pygame.K_ESCAPE: # KEYDOWN ESCAPE # exit application running = False break if event.key == pygame.K_SPACE: # KEYDOWN SPACE # toggle pause/play audio audio_pause = not audio_pause if event.key == pygame.K_TAB: # KEYDOWN TAB # reset audio playing audio_reset = True if autosave and not autosavenow: autosavenow = True if event.key == pygame.K_1: # KEYDOWN 1 # play instrument 0 instrument = 0 if event.key == pygame.K_2: # KEYDOWN 2 # play instrument 1 instrument = 1 if event.key == pygame.K_3: # KEYDOWN 3 # play instrument 2 instrument = 2 if event.key == pygame.K_4: # KEYDOWN 4 # play instrument 3 instrument = 3 if event.key == pygame.K_5: # KEYDOWN 5 # play instrument 4 instrument = 4 if event.key == pygame.K_c: # KEYDOWN C # y = np.expand_dims( np.where(current_notes > note_threshold, 1, 0), 0) latent_x = encoder.predict(y)[0] if use_pca: current_params = np.dot( latent_x - latent_means, latent_pca_vectors.T) / latent_pca_values else: current_params = ( latent_x - latent_means) / latent_stds needs_update = True # check if params were changed so that a new song should be generated if needs_update: if use_pca: latent_x = latent_means + \ np.dot(current_params * latent_pca_values, latent_pca_vectors) else: latent_x = latent_means + latent_stds * current_params latent_x = np.expand_dims(latent_x, axis=0) y = decoder([latent_x, 0])[0][0] current_notes = (y * (255)).astype(np.uint8) needs_update = False # draw GUI to the screen screen.fill(background_color) draw_notes(screen, notes_surface) draw_sliders(screen) draw_controls(screen) # flip the screen buffer pygame.display.flip() pygame.time.wait(10) # if app is exited, close the audio stream audio_stream.stop_stream() audio_stream.close() audio.terminate()