def get_instru_and_pr_from_folder_path_NP(folder_path, quantization): # There should be 1 files music_file = [ e for e in os.listdir(folder_path) if re.search(r'\.(mid|xml)$', e) ] assert len(music_file) == 1, "There should be only one music file" music_file = music_file[0] music_file_path = os.path.join(folder_path, music_file) # Get the type if re.search(r'\.mid$', music_file): # Deduce csv files csv_file_path = re.sub(r'\.mid$', '.csv', music_file_path) # Get pr reader_midi = Read_midi(music_file_path, quantization) pianoroll = reader_midi.read_file() else: csv_file_path = re.sub(r'\.xml$', '.csv', music_file_path) pianoroll, articulation = mxml.scoreToPianoroll( music_file_path, quantization) total_time = get_pianoroll_time(pianoroll) with open(csv_file_path, 'r') as ff: rr = csv.DictReader(ff, delimiter=';') instru = next(rr) # Simplify names : keep only tracks not marked as useless instru_simple = {k: simplify_instrumentation(v) for k, v in instru.items()} # Files name, no extensions name = re.sub(r'\.(mid|csv)$', '', music_file_path) return pianoroll, instru_simple, total_time, name
def file_processing(path, quantization, clip): reader_midi = Read_midi(path, quantization) # Read midi pianoroll = reader_midi.read_file() # Clip if clip: pianoroll = clip_pr(pianoroll) return pianoroll, get_pianoroll_time(pianoroll)
def linear_warp_pr(pianoroll, T_target): # Ensure we actually read a pianoroll # T_target is a scalar out = {} T_source = get_pianoroll_time(pianoroll) ratio = T_source / float(T_target) index_mask = [int(math.floor(x * ratio)) for x in range(0, T_target)] for k, v in pianoroll.items(): out[k] = pianoroll[k][index_mask, :] return out
def pitch_transposition(pr, pitch_shift): # pr is a dictionary pianoroll, before mapping to instruments # so dimensions are N_instru*128 if pitch_shift == 0: return pr T = pianoroll_processing.get_pianoroll_time(pr) N = pianoroll_processing.get_pitch_dim(pr) pr_shifted = {} for k, normal in pr.items(): shift = np.zeros((T, N)) if pitch_shift > 0: shift[:, pitch_shift:] = normal[:, 0:N - pitch_shift] else: shift[:, :N + pitch_shift] = normal[:, -pitch_shift:] pr_shifted[k] = shift return pr_shifted
def load_solo(piano_midi, quantization, binarize_piano, temporal_granularity): # Read piano pr pr_piano = Read_midi(path, quantization).read_file() # Process pr_piano pr_piano = process_data_piano(pr_piano, binarize_piano) # Take event level representation if temporal_granularity == 'event_level': event_piano = get_event_ind_dict(pr_piano) pr_piano = warp_pr_aux(pr_piano, event_piano) else: event_piano = None name_piano = re.sub(r'/.*\.mid', '', piano_midi) duration = get_pianoroll_time(pr_piano) return pr_piano, event_piano, name_piano, None, None, duration
def file_processing(path, quantization, clip): if re.search(r'.*\.mid$', path): reader_midi = Read_midi(path, quantization) # Read midi pianoroll = reader_midi.read_file() elif re.search(r'.*\.xml$', path): pianoroll, articulation, staccato_curve = mxml_to_pr( path, quantization) pr = sum_along_instru_dim(pianoroll) arti = sum_along_instru_dim(articulation) stacc = sum_along_instru_dim(staccato_curve) else: raise Exception("invalid extension {}, use either mid or xml") # Clip if clip: pianoroll = clip_pr(pianoroll) return pianoroll, get_pianoroll_time(pianoroll)
def file_processing(path, quantization, clip): pianoroll, articulation, staccato_curve, _ = mxml_to_pr(path, quantization, mapping_instru_mxml, apply_staccato=True) # Clip if clip: pianoroll, articulation, staccato_curve = clip_pr(pianoroll, articulation, staccato_curve) return pianoroll, articulation, staccato_curve, get_pianoroll_time(pianoroll)