def ltsm_gen_v2(net, seq_len, file_name, sampling_idx=0, note_pos=0, n_steps=100, hidden_size=178, num_layers=1, time_step=0.05, changing_note=False, note_stuck=False, remove_extra_rests=True): """ Uses the trained LSTM to generate new notes and saves the output to a MIDI file The difference between this and the previous one is that we only use one note as input And then keep generating notes until we have a sequence of notes of length = seq_len Once we do, we start appending the generated notes to the final output :param net: Trained LSTM :param seq_len: Length of input sequence :param file_name: Name to be given to the generated MIDI file :param sampling_idx: File to get the input note from, out of the pieces used to train the LSTM :param note_pos: Position of the sampled input note in the source piece, default to the first note :param n_steps: Number of vectors to generate :param hidden_size: Hidden size of the trained LSTM :param num_layers: Number of layers of the trained LSTM :param time_step: Vector duration. Should be the same as the one on get_right_hand() :param changing_note: To sample from different sources at some point of the generation and add this new note to the sequence. This is done in case the generation gets stuck repeating a particular sequence over and over. :param note_stuck: To change the note if the generation gets stuck playing the same note over and over. :param remove_extra_rests: If the generation outputs a lot of rests in between, use this :return: None. Just saves the generated music as a .mid file """ notes = [] # Will contain a sequence of the predicted notes x = notes_encoded[sampling_idx][note_pos:note_pos + 1, :, :] # First note of the piece notes.append(x.cpu().numpy()) # Saves the first note h_state = torch.zeros(num_layers, 1, hidden_size).float().cuda() c_state = torch.zeros(num_layers, 1, hidden_size).float().cuda() print_first = True change_note = False for _ in range(n_steps): chosen = False # To account for when no dimension's probability is bigger than 0.9 y_pred, h_c_state = net(x, (h_state, c_state)) h_state, c_state = h_c_state[0].data, h_c_state[1].data y_pred = y_pred.data y_pred = y_pred[ -1] # We only care about the last predicted note (next note after last note of input sequence) choose = torch.zeros( (1, 1, 178)) # Coverts the probabilities to the actual note vector y_pred_left = y_pred[:, :89] for idx in range(89): if y_pred_left[:, idx] > 0.9: choose[:, :, idx] = 1 chosen = True if y_pred_left[:, -1] >= 0.7: # We add a hold condition, in case the probability choose[:, :, 88] = 1 # of having a hold is close to the one of having the pitch if not chosen: if print_first: print( "\nPrinting out the maximum prob of all notes for a time step", "when this maximum prob is less than 0.9") print_first = False pred_note_idx = np.argmax(y_pred_left.cpu()) choose[:, :, pred_note_idx] = 1 if pred_note_idx != 87: # No holds for rests if y_pred_left[:, pred_note_idx] - y_pred_left[:, -1] <= 0.2: # Hold condition choose[:, :, 88] = 1 print(_, "left", y_pred_left[:, np.argmax(y_pred_left.cpu())] ) # Maximum probability out of all components y_pred_right = y_pred[:, 89:] for idx in range(89): if y_pred_right[:, idx] > 0.9: choose[:, :, idx + 89] = 1 chosen = True if y_pred_right[:, -1] >= 0.7: choose[:, :, -1] = 1 if not chosen: if print_first: print( "\nPrinting out the maximum prob of all notes for a time step", "when this maximum prob is less than 0.9") print_first = False pred_note_idx = np.argmax(y_pred_right.cpu()) choose[:, :, pred_note_idx + 89] = 1 if pred_note_idx != 87: # No holds for rests if y_pred_right[:, pred_note_idx] - y_pred_right[:, -1] <= 0.2: # Hold condition choose[:, :, -1] = 1 # Maximum probability out of all components print(_, "right", y_pred_right[:, np.argmax(y_pred_right.cpu())]) # If the number of input sequences is shorter than the expected one if x.shape[ 0] < seq_len: # We keep adding the predicted notes to this input x_new = torch.empty((x.shape[0] + 1, x.shape[1], x.shape[2])) for i in range(x_new.shape[0] - 1): x_new[i, :, :] = x[i, :, :] x_new[-1, :, :] = y_pred x = x_new.cuda() notes.append(choose) else: # If we already have enough sequences x_new = torch.empty(x.shape) # Removes the first note for idx, nt in enumerate(x[1:]): # of the current sequence x_new[idx] = nt # And appends the predicted note to the x_new[-1] = choose # input of sequences x = x_new.cuda() notes.append(choose) # Condition so that the generation does not # get stuck on a particular sequence if changing_note: if _ % seq_len == 0: if sampling_idx >= len(notes_encoded): sampling_idx = 0 change_note = True st = randint(1, 100) if change_note: x_new[-1] = notes_encoded[sampling_idx][st, :, :] change_note = False else: x_new[-1] = notes_encoded[sampling_idx][0, :, :] sampling_idx += 1 x = x_new.cuda() # Condition so that the generation does not # get stuck on a particular note if _ > 8 and note_stuck: if (notes[-1][:, :, 89:] == notes[-2][:, :, 89:]).sum(2)[0][0].numpy() in [ 88, 89 ]: if (notes[-1][:, :, 89:] == notes[-3][:, :, 89:]).sum(2)[0][0].numpy() in [ 88, 89 ]: if (notes[-1][:, :, 89:] == notes[-4][:, :, 89:] ).sum(2)[0][0].numpy() in [88, 89]: if (notes[-1][:, :, 89:] == notes[-5][:, :, 89:] ).sum(2)[0][0].numpy() in [88, 89]: if (notes[-1][:, :, 89:] == notes[-6][:, :, 89:] ).sum(2)[0][0].numpy() in [88, 89]: for m in range(5): notes.pop(-1) if sampling_idx >= len(notes_encoded): sampling_idx = 0 x_new[-1] = notes_encoded[sampling_idx][ randint(1, 100), :, :] x = x_new.cuda() sampling_idx += 1 # Gets the notes into the correct NumPy array shape gen_notes = np.empty((len(notes) - seq_len + 1, 178)) # Doesn't use the first predicted notes for idx, nt in enumerate( notes[seq_len - 1:]): # Because at first this will be inaccurate gen_notes[idx] = nt[0] # Decodes the generated music gen_midi_left = decode(get_tempo_dim_back(gen_notes[:, :89], 74), time_step=time_step) # Gets rid of too many rests if remove_extra_rests: stream_left = ms.stream.Stream() for idx, nt in enumerate(gen_midi_left): if type(nt) == ms.note.Rest and idx < len(gen_midi_left) - 5: if nt.duration.quarterLength > 4 * time_step: print("Removing rest") continue if type(gen_midi_left[idx + 4]) == ms.note.Rest: print("Removing rest") continue stream_left.append(nt) else: stream_left.append(nt) else: stream_left = gen_midi_left # Same thing for right hand gen_midi_right = decode(get_tempo_dim_back(gen_notes[:, 89:], 74), time_step=time_step) if remove_extra_rests: stream_right = ms.stream.Stream() for idx, nt in enumerate(gen_midi_right): if type(nt) == ms.note.Rest and idx < len(gen_midi_right) - 5: if nt.duration.quarterLength > 4 * time_step: print("Removing rest") continue if type(gen_midi_right[idx + 4]) == ms.note.Rest: print("Removing rest") continue stream_right.append(nt) else: stream_right.append(nt) else: stream_right = gen_midi_right # Saves both hands combined as a MIDI file combine(stream_left, stream_right, file_name + ".mid")
else: next_location_index = permutation[position_index + 1] location_from = points[location_index] location_to = points[next_location_index] draw_edge(location_from, location_to) plt.xlim(0, 1) plt.ylim(0, 1) plt.gca().set_aspect('equal', adjustable='box') plt.show() for index, test_batch in enumerate(test_loader): if index > 0: break random_sample = random.randint(0, 10) test_x = batch[0][random_sample] pred = model.forward(test_x) decoded_pred = decode(pred.detach().numpy()).astype('int') test_x = test_x.detach().numpy().reshape(-1, 2) draw_route(decoded_pred, test_x) # Save model torch.save(model.state_dict(), './models/v1.0.py')
def ltsm_gen(net, seq_len, file_name, sampling_idx=0, sequence_start=0, n_steps=100, hidden_size=178, time_step=0.05, changing_note=False, note_stuck=False, remove_extra_rests=True): """ Uses the trained LSTM to generate new notes and saves the output to a MIDI file This approach uses a whole sequence of notes of one of the pieces we used to train the network, with length seq_len, which should be the same as the one used when training :param net: Trained LSTM :param seq_len: Length of input sequence :param file_name: Name to be given to the generated MIDI file :param sampling_idx: File to get the input sequence from, out of the pieces used to train the LSTM :param sequence_start: Index of the starting sequence, default to 0 :param n_steps: Number of vectors to generate :param hidden_size: Hidden size of the trained LSTM :param time_step: Vector duration. Should be the same as the one on get_right_hand() :param changing_note: To sample from different sources at some point of the generation and add this new note to the sequence. This is done in case the generation gets stuck repeating a particular sequence over and over. :param note_stuck: To change the note if the generation gets stuck playing the same note over and over. :param remove_extra_rests: If the generation outputs a lot of rests in between, use this :return: None. Just saves the generated music as a .mid file """ notes = [] # Will contain a sequence of the predicted notes x = notes_encoded[sampling_idx][sequence_start:sequence_start + seq_len] # Uses the input sequence for nt in x: # To start predicting. This will be later removed from notes.append(nt.cpu().numpy()) # the final output h_state = torch.zeros(1, 1, hidden_size).float().cuda() c_state = torch.zeros(1, 1, hidden_size).float().cuda() print_first = True # To print out a message if every component of a # predicted vector is less than 0.9 change_note = False for _ in range(n_steps): chosen = False # To account for when no dimension's probability is bigger than 0.9 y_pred, h_c_state = net( x, (h_state, c_state)) # Predicts the next notes for all h_state, c_state = h_c_state[0].data, h_c_state[ 1].data # the notes in the input sequence y_pred = y_pred.data # We only care about the last predicted note y_pred = y_pred[-1] # (next note after last note of input sequence) choose = torch.zeros( (1, 1, 178)) # Coverts the probabilities to the actual note vector y_pred_left = y_pred[:, :89] for idx in range(89): if y_pred_left[:, idx] > 0.9: choose[:, :, idx] = 1 chosen = True if y_pred_left[:, -1] >= 0.7: # We add a hold condition, in case the probability choose[:, :, 88] = 1 # of having a hold is close to the one of having the pitch if not chosen: if print_first: print( "\nPrinting out the maximum prob of all notes for a time step", "when this maximum prob is less than 0.9") print_first = False pred_note_idx = np.argmax(y_pred_left.cpu()) choose[:, :, pred_note_idx] = 1 if pred_note_idx != 87: # No holds for rests if y_pred_left[:, pred_note_idx] - y_pred_left[:, -1] <= 0.2: # Hold condition choose[:, :, 88] = 1 print(_, "left", y_pred_left[:, np.argmax(y_pred_left.cpu())] ) # Maximum probability out of all components y_pred_right = y_pred[:, 89:] for idx in range(89): if y_pred_right[:, idx] > 0.9: choose[:, :, idx + 89] = 1 chosen = True if y_pred_right[:, -1] >= 0.7: choose[:, :, -1] = 1 if not chosen: if print_first: print( "\nPrinting out the maximum prob of all notes for a time step", "when this maximum prob is less than 0.9") print_first = False pred_note_idx = np.argmax(y_pred_right.cpu()) choose[:, :, pred_note_idx + 89] = 1 if pred_note_idx != 87: # No holds for rests if y_pred_right[:, pred_note_idx] - y_pred_right[:, -1] <= 0.2: # Hold condition choose[:, :, -1] = 1 print(_, "right", y_pred_right[:, np.argmax(y_pred_right.cpu())] ) # Maximum probability out of all components x_new = torch.empty(x.shape) # Uses the output of the last time_step for idx, nt in enumerate(x[1:]): # As the input for the next time_step x_new[ idx] = nt # So the new sequence will be the same past sequence minus the first note x_new[-1] = choose x = x_new.cuda( ) # We will use this new sequence to predict in the next iteration the next note notes.append(choose.cpu().numpy()) # Saves the predicted note # Condition so that the generation does not # get stuck on a particular sequence if changing_note: if _ % seq_len == 0: if sampling_idx >= len(notes_encoded): sampling_idx = 0 change_note = True st = randint(1, 100) if change_note: x_new[-1] = notes_encoded[sampling_idx][st, :, :] change_note = False else: x_new[-1] = notes_encoded[sampling_idx][0, :, :] sampling_idx += 1 x = x_new.cuda() # Condition so that the generation does not # get stuck on a particular note if _ > 6 and note_stuck: if (notes[-1][:, :, 89:] == notes[-2][:, :, 89:]).sum(2)[0][0].numpy() in [ 88, 89 ]: if (notes[-1][:, :, 89:] == notes[-3][:, :, 89:]).sum(2)[0][0].numpy() in [ 88, 89 ]: if (notes[-1][:, :, 89:] == notes[-4][:, :, 89:] ).sum(2)[0][0].numpy() in [88, 89]: if (notes[-1][:, :, 89:] == notes[-5][:, :, 89:] ).sum(2)[0][0].numpy() in [88, 89]: if (notes[-1][:, :, 89:] == notes[-6][:, :, 89:] ).sum(2)[0][0].numpy() in [88, 89]: for m in range(5): notes.pop(-1) if sampling_idx >= len(notes_encoded): sampling_idx = 0 x_new[-1] = notes_encoded[sampling_idx][ randint(1, 100), :, :] x = x_new.cuda() sampling_idx += 1 # Gets the notes into the correct NumPy array shape gen_notes = np.empty((len(notes) - seq_len + 1, 178)) # Doesn't use the first predicted notes for idx, nt in enumerate( notes[seq_len - 1:]): # Because these were sampled from the training data gen_notes[idx] = nt[0] # Decodes the generated music gen_midi_left = decode(get_tempo_dim_back(gen_notes[:, :89], 74), time_step=time_step) # Gets rid of too many rests if remove_extra_rests: stream_left = ms.stream.Stream() for idx, nt in enumerate(gen_midi_left): if type(nt) == ms.note.Rest and idx < len(gen_midi_left) - 5: if nt.duration.quarterLength > 4 * time_step: print("Removing rest") continue if type(gen_midi_left[idx + 4]) == ms.note.Rest: print("Removing rest") continue stream_left.append(nt) else: stream_left.append(nt) else: stream_left = gen_midi_left # Same thing for right hand gen_midi_right = decode(get_tempo_dim_back(gen_notes[:, 89:], 74), time_step=time_step) if remove_extra_rests: stream_right = ms.stream.Stream() for idx, nt in enumerate(gen_midi_right): if type(nt) == ms.note.Rest and idx < len(gen_midi_right) - 5: if nt.duration.quarterLength > 4 * time_step: print("Removing rest") continue if type(gen_midi_right[idx + 4]) == ms.note.Rest: print("Removing rest") continue stream_right.append(nt) else: stream_right.append(nt) else: stream_right = gen_midi_right # Saves both hands combined as a MIDI file combine(stream_left, stream_right, file_name + ".mid")
import numpy as np import crossover as c import encoder_decoder as ed channels_list = [] # List of channels names channels_ROI = [] # List of channels of roi to be transformed into % chrom_list = [] # population list lu_list = [] # lower and upper list budget = int(input("Enter the Marketing budget in thousands : \n")) Nofchannels = int(input("Enter The number of marketing channels : \n")) for _ in range(Nofchannels): c_name, c_value = input("Enter name and ROI of each channel : \n").split( " ") channels_list.append(c_name) channels_ROI.append(c_value) for __ in range(Nofchannels): l, u = input( "Enter the lower (k) and upper bounds (%) of investment in each channel:\n(enter x if there is no bound)\n" ).split(" ") if (l != "x"): lu_list.append((float((float(l) * 1000) / (budget * 1000) * 100), u)) else: lu_list.append((l, u)) print(ed.encode(channels_list)) print(ed.decode(3))