def main(args): messenger = mes.Messenger() (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap) = utils.get_tuple_beats() messenger.message(['length_beats', str(length_beats)]) data_chords = ir.extract_chords( os.path.join( utils.get_dirname_audio_warped() if utils.b_use_warped() else utils.get_dirname_audio(), utils._get_name_project_most_recent() + '.wav')) non_empty_chords = vamp_filter.filter_non_chords(data_chords) # TODO: refactor, this is slow events_chords: Dict[float, music21.chord.Chord] = vamp_convert.vamp_chord_to_dict( non_empty_chords) df_chords = prep_vamp.chords_to_df(events_chords) chord_tree = quantize.get_interval_tree(df_chords, diff=False) data_quantized = quantize.quantize(beatmap, s_beat_start, s_beat_end, trees={'chord': chord_tree}) data_quantized_chords = data_quantized['chord'] score = postp_mxl.df_grans_to_score(data_quantized_chords, parts=['chord']) part_chord = postp_mxl.extract_part(score, 'chord') part_chord = postp_mxl.force_texture(part_chord, num_voices=4) utils.create_dir_score() utils.create_dir_chord() filename_pickle = os.path.join( utils.get_dirname_score(), 'chord', ''.join([utils._get_name_project_most_recent(), '.pkl'])) utils_mxl.freeze_stream(part_chord, filename_pickle) notes_live = convert_mxl.to_notes_live(part_chord, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo) exporter = io_exporter.Exporter() exporter.set_part(notes_live, 'chord') exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): messenger = mes.Messenger() use_warped = utils.b_use_warped() (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap) = utils.get_tuple_beats() messenger.message(['length_beats', str(length_beats)]) representation = utils.parse_arg(args.representation) if representation == 'symbolic': filename_pickle = os.path.join( utils.get_dirname_score(), 'melody', ''.join([utils._get_name_project_most_recent(), '.pkl'])) part_melody = utils_mxl.thaw_stream(filename_pickle) stream_segment = analysis_mxl.get_segments(part_melody) elif representation == 'numeric': data_segments = ir.extract_segments( os.path.join( utils.get_dirname_audio_warped() if use_warped else utils.get_dirname_audio(), utils._get_name_project_most_recent() + '.wav')) df_segments = prep_vamp.segments_to_df(data_segments) segment_tree = quantize.get_interval_tree(df_segments, diff=False) data_quantized = quantize.quantize(beatmap, s_beat_start, s_beat_end, trees={'segment': segment_tree}) data_quantized_segments = data_quantized['segment'] score = postp_mxl.df_grans_to_score(data_quantized_segments, parts=['segment']) stream_segment = postp_mxl.extract_part(score, 'segment') else: raise ' '.join(['representation', representation, 'does not exist']) utils.create_dir_score() utils.create_dir_segment() filename_pickle = os.path.join( utils.get_dirname_score(), 'segment', ''.join([utils._get_name_project_most_recent(), '.pkl'])) utils_mxl.freeze_stream(stream_segment, filename_pickle) notes_live = convert_mxl.to_notes_live(stream_segment, beatmap, s_beat_start, s_beat_end, tempo) exporter = io_exporter.Exporter() exporter.set_part(notes_live, 'segment') exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): ( s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap ) = utils.get_tuple_beats() messenger = mes.Messenger() messenger.message(['length_beats', str(length_beats)]) ts_beatmap = prep_vamp.beatmap_to_ts( beatmap ) df_beatmap = prep_vamp.ts_beatmap_to_df( ts_beatmap ) beatmap_tree = quantize.get_interval_tree( df_beatmap, diff=False, preserve_struct=True ) data_quantized = quantize.quantize( beatmap, s_beat_start, s_beat_end, trees={ 'beatmap': beatmap_tree } ) data_quantized_beats = data_quantized['beatmap'] score = postp_mxl.df_grans_to_score( data_quantized_beats, parts=['beatmap'], type_equality='absolute' ) stream_beatmap = postp_mxl.extract_part( score, 'beatmap' ) notes_live = convert_mxl.to_notes_live( stream_beatmap, beatmap, s_beat_start, s_beat_end, tempo ) exporter = io_exporter.Exporter() exporter.set_part(notes_live, 'beatmap') exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): use_warped = utils.b_use_warped() name_part = utils.parse_arg(args.name_part) beat_multiple_quantization = utils.parse_arg(args.beat_multiple) quarter_length_divisor = 1/float(beat_multiple_quantization) importer = io_importer.Importer( utils.get_file_json_comm() ) importer.load([name_part]) notes_live = importer.get_part(name_part) mode = 'polyphonic' if name_part == 'chord' else 'monophonic' ( s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap ) = utils.get_tuple_beats() messenger = mes.Messenger() messenger.message(['length_beats', str(length_beats)]) stream = convert_mxl.live_to_stream( notes_live, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo, mode=mode ) # TODO: replace with logic in "granularize.py" if name_part == 'melody': data_melody = conv_vamp.to_data_melody( notes_live, offset_s_audio=0, duration_s_audio=utils.get_duration_s_audio( filename=os.path.join( utils.get_dirname_audio_warped() if use_warped else utils.get_dirname_audio(), utils._get_name_project_most_recent() + '.wav' ) ) ) df_melody = prep_vamp.melody_to_df( (data_melody['vector'][0], data_melody['vector'][1]), index_type='s' ) df_melody[df_melody['melody'] < 0] = 0 melody_tree = quantize.get_interval_tree( df_melody ) data_quantized = quantize.quantize( beatmap, s_beat_start, s_beat_end, trees={ 'melody': melody_tree } ) data_quantized_melody = data_quantized['melody'] score = postp_mxl.df_grans_to_score( data_quantized_melody, parts=['melody'] ) stream = postp_mxl.extract_part( score, 'melody' ) else: stream.quantize( (quarter_length_divisor, ), inPlace=True ) notes_live = convert_mxl.to_notes_live( stream, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo, bypass_seconds=True ) exporter = io_exporter.Exporter() exporter.set_part(notes_live, name_part) exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): ( s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap ) = utils.get_tuple_beats() messenger = mes.Messenger() messenger.message(['length_beats', str(length_beats)]) df = conv_max.from_coll( conv_max.file_ts_coll ) df = conv_mid.hz_to_mid( df.rename( columns={'signal': 'melody'} ) ) df_melody_diff = filt_midi.to_diff( df, 'melody' ) sample_rate = .0029 df_melody_diff.index = df_melody_diff.index * sample_rate # TODO: add index s before quantizing tree_melody = quantize.get_interval_tree( df_melody_diff ) data_quantized = quantize.quantize( beatmap, s_beat_start, s_beat_end, trees={ 'melody': tree_melody } ) score = postp_mxl.df_grans_to_score( data_quantized['melody'], parts=['melody'] ) exporter = io_exporter.Exporter() part_melody = postp_mxl.extract_part( score, 'melody' ) exporter.set_part( notes=conv_mxl.to_notes_live( part_melody, beatmap, s_beat_start, s_beat_end, tempo ), name_part='melody' ) exporter.export( utils.get_file_json_comm() ) messenger.message(['done', 'bang'])