def _on_off_melo(self, tensor_01): """ Convert one-song tensor to one Melo. Tensor has to have only 0 or 1 only. Melo returned is sorted. Args: tensor_01 (np.ndarray) - shape (num_of_time, num_of_features) Output: output_melo (Melo) """ output_melo = meloclass.Melo() output_track = meloclass.Track() output_melo.tracks.append(output_track) for time in range(tensor_01.shape[0]): for pitch in range(tensor_01.shape[1]): if tensor_01[time][pitch] == 1: if pitch % 2 == 0: # Note on event output_track.notes.append(meloclass.NoteEvent("note_on",pitch/2+self.args.prepare_normalize_lowest,time)) else: # Note off event output_track.notes.append(meloclass.NoteEvent("note_off",(pitch-1)/2+self.args.prepare_normalize_lowest,time)) elif tensor_01[time][pitch] != 0: print(tensor_01[time][pitch]) raise ValueError("The tensor of tensor_of_melo cannot contain value other than 0 or 1.") return self.melo_processor.sort_melo(output_melo)
def _convert_melo_to_relative_time(self, abs_melo): """ Normally, Melo only uses absolute time. However this is inconvenient to convert to MidiFile that uses relative time. This function changes all the NoteEvent's time to relative time (but still use smallest unit defined by resolution instead of tick as unit of time) NOTE that a Melo that uses relative time instead of absolute time should never ever appear outside midimeloconverter! Args: abs_melo (Melo) Output: rel_melo (Melo) DO NOT USE THIS MELO OUTSIDE THIS CLASS. """ rel_melo = meloclass.Melo() for abs_melo_track in abs_melo.tracks: prev_time = 0 rel_melo_track = meloclass.Track() for abs_note in abs_melo_track.notes: rel_melo_track.notes.append( meloclass.NoteEvent(abs_note.type, abs_note.pitch, abs_note.time - prev_time)) prev_time = abs_note.time rel_melo.tracks.append(rel_melo_track) return rel_melo
def midi_to_melo(self, midifile): """ Convert Midi to Melo. Args: midifile (MidiFile) Output: output_melo (Melo) """ output_melo = meloclass.Melo() # Meaning of resolution: (1/(4*resolution))-th note. # For example, if resolution = 1, the smallest unit of Melo # will be quarter note. If resolution = 4, the smallest unit of # Melo will be 16-th note. ticks_per_smallest_unit = midifile.ticks_per_beat // self.args.prepare_resolution for midi_track in midifile.tracks: abs_time = 0 melo_track = meloclass.Track() for msg in midi_track: abs_time += msg.time # It is necessary to add MetaMessage's time too even if it is not added to Melo if msg.type == "time_signature": output_melo.ts_numerator = msg.numerator output_melo.ts_denominator = msg.denominator if not msg.is_meta and msg.channel + 1 == DRUM_CHANNEL: # ignore drums. Add 1 because mido's channel range starts from 0 while Midi standard starts from 1. # Notice that because meta message has no attribute "channel" # The short-circuit evaluation is needed to prevent AttributeError continue if msg.type == "note_on" or msg.type == "note_off": melo_track.notes.append( meloclass.NoteEvent( str(msg.type), msg.note, abs_time // ticks_per_smallest_unit)) output_melo.tracks.append(melo_track) return output_melo
def normalize_melo(self, bound_melo): """ Transpose one-track Melo so that its lowest note will be around args.normalize_lowest. The transpose will try to make the new Melo as close to C major or A minor as possible by minimizing number of black keys. Any note higher than args.normalize_lowest + args.normalize_range will be removed (not to extract a melody, which was done by raw_melo_to_melody_melo() already, but to restrict the number of features of input of neural network) Args: bound_melo (Melo) Return: normalized_melo (Melo) args.normalize_lowest args.normalize_range """ # TODO: This is not a good transposing algorithm # Using advanced key detection algorithm can # help to perform better normalization. if len(bound_melo.tracks) != 1: raise ValueError( "Do not call normalize_melo on Melo that does not have exactly one track. This Melo has %i tracks." % len(bound_melo.tracks)) original_pitch_set = set([n.pitch for n in bound_melo.tracks[0].notes]) start_trans = min( original_pitch_set) - self.args.prepare_normalize_lowest start_pitch_set = set([p - start_trans for p in original_pitch_set]) min_black_keys = self._count_black_keys(start_pitch_set) min_pitch_set = start_pitch_set for i in range(1, 12): pitch_set = set([p + i for p in start_pitch_set]) black_keys = self._count_black_keys(pitch_set) if black_keys < min_black_keys: min_black_keys = black_keys min_pitch_set = pitch_set final_trans = min(original_pitch_set) - min(min_pitch_set) normalized_melo = meloclass.Melo() normalized_track = meloclass.Track() normalized_melo.tracks.append(normalized_track) normalized_track.notes = [ meloclass.NoteEvent(n.type, n.pitch - final_trans, n.time) for n in bound_melo.tracks[0].notes if self.args.prepare_normalize_lowest <= n.pitch - final_trans <= self.args.prepare_normalize_lowest + self.args.prepare_normalize_range ] return normalized_melo
def _no_bound_combine(self, raw_melo): """ Convert multi-track melo to one-track melo sorted by time without removing anything. Args: raw_melo (Melo) Return: processed_melo (Melo) """ processed_melo = meloclass.Melo() processed_track = meloclass.Track() processed_melo.tracks.append(processed_track) for raw_track in raw_melo.tracks: for note in raw_track.notes: processed_track.notes.append(note) processed_track.notes.sort(key=lambda x: x.time) return processed_melo
def _kill_note_combine(self, raw_melo): """ Convert multi-track melo to one-track melo sorted by time. However, if a note goes out of the range defined by args.melody_bound_mode, that note will be ignored. Args: raw_melo (Melo) Return: processed_melo (Melo) """ processed_melo = meloclass.Melo() processed_track = meloclass.Track() processed_melo.tracks.append(processed_track) for raw_track in raw_melo.tracks: for note in raw_track.notes: if self._is_in_range(note): processed_track.notes.append(note) processed_track.notes.sort(key=lambda x: x.time) return processed_melo
def _hold_on_melo(self, tensor_01): """ Convert one-song tensor to one Melo. Tensor has to ahve only 0 or 1 only. Melo returned is sorted. Args: tensor_01 (np.ndarray) - shape (num_of_time, num_of_features) Output: output_melo (Melo) """ output_melo = meloclass.Melo() output_track = meloclass.Track() output_melo.tracks.append(output_track) for time in range(tensor_01.shape[0]): for pitch in range(tensor_01.shape[1]): if pitch % 2 == 0 and tensor_01[time][pitch] == 1: on_note = meloclass.NoteEvent("note_on",pitch/2+self.args.prepare_normalize_lowest,time) output_track.notes.append(on_note) # Find note_off time off_note_found = False off_time = None j = time + 1 while not off_note_found and j < tensor_01.shape[0]: if tensor_01[j][pitch+1] == 0: off_time = j off_note_found = True j += 1 # when examining a subsong, it is possile that # an off_note cannot be found within range # in that case, simply set j to be the last time if j == tensor_01.shape[0]: j = tensor_01.shape[0] - 1 off_time = j off_note = meloclass.NoteEvent("note_off",pitch//2+self.args.prepare_normalize_lowest,off_time) output_track.notes.append(off_note) elif tensor_01[time][pitch] != 0 and tensor_01[time][pitch] != 1: raise ValueError("The tensor of tensor_of_melo cannot contain value other than 0 or 1.") return self.melo_processor.sort_melo(output_melo)
def _kill_track_combine(self, raw_melo): """ Convert multi-track melo to one-track melo sorted by time. However, if a track has at least one note that goes out of the range defined by args.melody_bound_mode, that track will be ignored. Args: raw_melo (Melo) Return: processed_melo (Melo) """ processed_melo = meloclass.Melo() for raw_track in raw_melo.tracks: processed_track = meloclass.Track() track_in_range = True for note in raw_track.notes: if not self._is_in_range(note): track_in_range = False processed_track.notes.append(note) if track_in_range: processed_melo.tracks.append(processed_track) return self._no_bound_combine(processed_melo)