def main(args): name_part = args.name_part.replace("\"", '') importer = io_importer.Importer(utils.get_file_json_comm()) importer.load([name_part]) notes_live = postp_live.filter_empty(importer.get_part(name_part)) mode = 'polyphonic' if name_part == 'chord' else 'monophonic' (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap) = utils.get_tuple_beats() stream = conv_mxl.live_to_stream(notes_live, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo, mode=mode) utils.create_dir_score() utils.create_dir_part(name_part) filename_pickle = os.path.join( utils.get_dirname_score(), name_part, ''.join([utils._get_name_project_most_recent(), '.pkl'])) utils_mxl.freeze_stream(stream, filename_pickle) messenger = mes.Messenger() messenger.message(['done', 'bang'])
def main(args): messenger = mes.Messenger() (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap) = utils.get_tuple_beats() messenger.message(['length_beats', str(length_beats)]) messenger.message(['done', 'bang'])
def main(args): name_part = 'homophony' importer = io_importer.Importer( utils.get_file_json_comm() ) importer.load([name_part]) notes_live = importer.get_part(name_part) ( s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap ) = utils.get_tuple_beats() messenger = mes.Messenger() messenger.message(['length_beats', str(length_beats)]) stream = convert_mxl.live_to_stream( notes_live, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo, mode='polyphonic' ) part_textured = postp_mxl.force_texture(stream, int(args.desired_texture.replace('"', ''))) part_voice_extracted = postp_mxl.extract_voice(part_textured, 1) notes_live = convert_mxl.to_notes_live( part_voice_extracted, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo, bypass_seconds=True ) exporter = io_exporter.Exporter() exporter.set_part(notes_live, name_part) exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): messenger = mes.Messenger() (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap) = utils.get_tuple_beats() messenger.message(['length_beats', str(length_beats)]) filename_pickle = os.path.join( utils.get_dirname_score(), 'chord', ''.join([utils._get_name_project_most_recent(), '.pkl'])) part_chord_thawed = utils_mxl.thaw_stream(filename_pickle) part_key_centers = analysis_mxl.get_key_center_estimates(part_chord_thawed) utils.create_dir_score() utils.create_dir_key_center() filename_pickle = os.path.join( utils.get_dirname_score(), 'key_center', ''.join([utils._get_name_project_most_recent(), '.pkl'])) ################ TODO: remove # filename_part = os.path.join( # utils.get_dirname_score(), # 'key_center', # utils._get_name_project_most_recent() + '.pkl' # ) # # part_key_centers = utils_mxl.thaw_stream( # filename_part # ) ############### utils_mxl.freeze_stream(part_key_centers, filename_pickle) notes_live = convert_mxl.to_notes_live(part_key_centers, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo) exporter = io_exporter.Exporter() exporter.set_part(notes_live, 'key_center') exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): messenger = mes.Messenger() ( s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap ) = utils.get_tuple_beats() messenger.message(['length_beats', str(length_beats)]) stream_chord = utils_mxl.thaw_stream( os.path.join( utils.get_dirname_score(), 'chord', utils._get_name_project_most_recent() + '.pkl' ) ) stream_root = postp_mxl.extract_root( stream_chord ) notes_live = convert_mxl.to_notes_live( stream_root, beatmap, s_beat_start, s_beat_end, tempo ) exporter = io_exporter.Exporter() exporter.set_part(notes_live, 'root') exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): name_part = utils.parse_arg(args.name_part) importer = io_importer.Importer(utils.get_file_json_comm()) importer.load([name_part]) notes_live = importer.get_part(name_part) mode = 'polyphonic' if name_part == 'chord' else 'monophonic' (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap) = utils.get_tuple_beats() messenger = mes.Messenger() messenger.message(['length_beats', str(length_beats)]) stream = convert_mxl.live_to_stream(notes_live, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo, mode=mode) notes_live = convert_mxl.to_notes_live(stream, beatmap, s_beat_start, s_beat_end, tempo) exporter = io_exporter.Exporter() exporter.set_part(notes_live, name_part) exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): messenger = mes.Messenger() (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap) = utils.get_tuple_beats() messenger.message(['length_beats', str(length_beats)]) data_chords = ir.extract_chords( os.path.join( utils.get_dirname_audio_warped() if utils.b_use_warped() else utils.get_dirname_audio(), utils._get_name_project_most_recent() + '.wav')) non_empty_chords = vamp_filter.filter_non_chords(data_chords) # TODO: refactor, this is slow events_chords: Dict[float, music21.chord.Chord] = vamp_convert.vamp_chord_to_dict( non_empty_chords) df_chords = prep_vamp.chords_to_df(events_chords) chord_tree = quantize.get_interval_tree(df_chords, diff=False) data_quantized = quantize.quantize(beatmap, s_beat_start, s_beat_end, trees={'chord': chord_tree}) data_quantized_chords = data_quantized['chord'] score = postp_mxl.df_grans_to_score(data_quantized_chords, parts=['chord']) part_chord = postp_mxl.extract_part(score, 'chord') part_chord = postp_mxl.force_texture(part_chord, num_voices=4) utils.create_dir_score() utils.create_dir_chord() filename_pickle = os.path.join( utils.get_dirname_score(), 'chord', ''.join([utils._get_name_project_most_recent(), '.pkl'])) utils_mxl.freeze_stream(part_chord, filename_pickle) notes_live = convert_mxl.to_notes_live(part_chord, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo) exporter = io_exporter.Exporter() exporter.set_part(notes_live, 'chord') exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): messenger = mes.Messenger() use_warped = utils.b_use_warped() (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap) = utils.get_tuple_beats() messenger.message(['length_beats', str(length_beats)]) representation = utils.parse_arg(args.representation) if representation == 'symbolic': filename_pickle = os.path.join( utils.get_dirname_score(), 'melody', ''.join([utils._get_name_project_most_recent(), '.pkl'])) part_melody = utils_mxl.thaw_stream(filename_pickle) stream_segment = analysis_mxl.get_segments(part_melody) elif representation == 'numeric': data_segments = ir.extract_segments( os.path.join( utils.get_dirname_audio_warped() if use_warped else utils.get_dirname_audio(), utils._get_name_project_most_recent() + '.wav')) df_segments = prep_vamp.segments_to_df(data_segments) segment_tree = quantize.get_interval_tree(df_segments, diff=False) data_quantized = quantize.quantize(beatmap, s_beat_start, s_beat_end, trees={'segment': segment_tree}) data_quantized_segments = data_quantized['segment'] score = postp_mxl.df_grans_to_score(data_quantized_segments, parts=['segment']) stream_segment = postp_mxl.extract_part(score, 'segment') else: raise ' '.join(['representation', representation, 'does not exist']) utils.create_dir_score() utils.create_dir_segment() filename_pickle = os.path.join( utils.get_dirname_score(), 'segment', ''.join([utils._get_name_project_most_recent(), '.pkl'])) utils_mxl.freeze_stream(stream_segment, filename_pickle) notes_live = convert_mxl.to_notes_live(stream_segment, beatmap, s_beat_start, s_beat_end, tempo) exporter = io_exporter.Exporter() exporter.set_part(notes_live, 'segment') exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): use_warped = utils.b_use_warped() name_part = utils.parse_arg(args.name_part) beat_multiple_quantization = utils.parse_arg(args.beat_multiple) quarter_length_divisor = 1/float(beat_multiple_quantization) importer = io_importer.Importer( utils.get_file_json_comm() ) importer.load([name_part]) notes_live = importer.get_part(name_part) mode = 'polyphonic' if name_part == 'chord' else 'monophonic' ( s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap ) = utils.get_tuple_beats() messenger = mes.Messenger() messenger.message(['length_beats', str(length_beats)]) stream = convert_mxl.live_to_stream( notes_live, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo, mode=mode ) # TODO: replace with logic in "granularize.py" if name_part == 'melody': data_melody = conv_vamp.to_data_melody( notes_live, offset_s_audio=0, duration_s_audio=utils.get_duration_s_audio( filename=os.path.join( utils.get_dirname_audio_warped() if use_warped else utils.get_dirname_audio(), utils._get_name_project_most_recent() + '.wav' ) ) ) df_melody = prep_vamp.melody_to_df( (data_melody['vector'][0], data_melody['vector'][1]), index_type='s' ) df_melody[df_melody['melody'] < 0] = 0 melody_tree = quantize.get_interval_tree( df_melody ) data_quantized = quantize.quantize( beatmap, s_beat_start, s_beat_end, trees={ 'melody': melody_tree } ) data_quantized_melody = data_quantized['melody'] score = postp_mxl.df_grans_to_score( data_quantized_melody, parts=['melody'] ) stream = postp_mxl.extract_part( score, 'melody' ) else: stream.quantize( (quarter_length_divisor, ), inPlace=True ) notes_live = convert_mxl.to_notes_live( stream, beatmap=beatmap, s_beat_start=s_beat_start, s_beat_end=s_beat_end, tempo=tempo, bypass_seconds=True ) exporter = io_exporter.Exporter() exporter.set_part(notes_live, name_part) exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): ( s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap ) = utils.get_tuple_beats() messenger = mes.Messenger() messenger.message(['length_beats', str(length_beats)]) ts_beatmap = prep_vamp.beatmap_to_ts( beatmap ) df_beatmap = prep_vamp.ts_beatmap_to_df( ts_beatmap ) beatmap_tree = quantize.get_interval_tree( df_beatmap, diff=False, preserve_struct=True ) data_quantized = quantize.quantize( beatmap, s_beat_start, s_beat_end, trees={ 'beatmap': beatmap_tree } ) data_quantized_beats = data_quantized['beatmap'] score = postp_mxl.df_grans_to_score( data_quantized_beats, parts=['beatmap'], type_equality='absolute' ) stream_beatmap = postp_mxl.extract_part( score, 'beatmap' ) notes_live = convert_mxl.to_notes_live( stream_beatmap, beatmap, s_beat_start, s_beat_end, tempo ) exporter = io_exporter.Exporter() exporter.set_part(notes_live, 'beatmap') exporter.export(utils.get_file_json_comm()) messenger.message(['done', 'bang'])
def main(args): ( s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats, beatmap ) = utils.get_tuple_beats() messenger = mes.Messenger() messenger.message(['length_beats', str(length_beats)]) df = conv_max.from_coll( conv_max.file_ts_coll ) df = conv_mid.hz_to_mid( df.rename( columns={'signal': 'melody'} ) ) df_melody_diff = filt_midi.to_diff( df, 'melody' ) sample_rate = .0029 df_melody_diff.index = df_melody_diff.index * sample_rate # TODO: add index s before quantizing tree_melody = quantize.get_interval_tree( df_melody_diff ) data_quantized = quantize.quantize( beatmap, s_beat_start, s_beat_end, trees={ 'melody': tree_melody } ) score = postp_mxl.df_grans_to_score( data_quantized['melody'], parts=['melody'] ) exporter = io_exporter.Exporter() part_melody = postp_mxl.extract_part( score, 'melody' ) exporter.set_part( notes=conv_mxl.to_notes_live( part_melody, beatmap, s_beat_start, s_beat_end, tempo ), name_part='melody' ) exporter.export( utils.get_file_json_comm() ) messenger.message(['done', 'bang'])