def main(args):

    name_part = args.name_part.replace("\"", '')

    importer = io_importer.Importer(utils.get_file_json_comm())

    importer.load([name_part])

    notes_live = postp_live.filter_empty(importer.get_part(name_part))

    mode = 'polyphonic' if name_part == 'chord' else 'monophonic'

    (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats,
     beatmap) = utils.get_tuple_beats()

    stream = conv_mxl.live_to_stream(notes_live,
                                     beatmap=beatmap,
                                     s_beat_start=s_beat_start,
                                     s_beat_end=s_beat_end,
                                     tempo=tempo,
                                     mode=mode)

    utils.create_dir_score()

    utils.create_dir_part(name_part)

    filename_pickle = os.path.join(
        utils.get_dirname_score(), name_part,
        ''.join([utils._get_name_project_most_recent(), '.pkl']))

    utils_mxl.freeze_stream(stream, filename_pickle)

    messenger = mes.Messenger()

    messenger.message(['done', 'bang'])
def main(args):
    tempo_estimate = ir.extract_tempo()

    messenger = mes.Messenger()

    messenger.message([str(tempo_estimate)])

    messenger.message(['done', 'bang'])
def main(args):

    messenger = mes.Messenger()

    (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats,
     beatmap) = utils.get_tuple_beats()

    messenger.message(['length_beats', str(length_beats)])

    messenger.message(['done', 'bang'])
def main(args):

    name_part = 'homophony'

    importer = io_importer.Importer(
        utils.get_file_json_comm()
    )

    importer.load([name_part])

    notes_live = importer.get_part(name_part)

    (
        s_beat_start,
        s_beat_end,
        tempo,
        beat_start,
        beat_end,
        length_beats,
        beatmap
    ) = utils.get_tuple_beats()

    messenger = mes.Messenger()

    messenger.message(['length_beats', str(length_beats)])

    stream = convert_mxl.live_to_stream(
        notes_live,
        beatmap=beatmap,
        s_beat_start=s_beat_start,
        s_beat_end=s_beat_end,
        tempo=tempo,
        mode='polyphonic'
    )

    part_textured = postp_mxl.force_texture(stream, int(args.desired_texture.replace('"', '')))

    part_voice_extracted = postp_mxl.extract_voice(part_textured, 1)

    notes_live = convert_mxl.to_notes_live(
        part_voice_extracted,
        beatmap=beatmap,
        s_beat_start=s_beat_start,
        s_beat_end=s_beat_end,
        tempo=tempo,
        bypass_seconds=True
    )

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, name_part)

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
Esempio n. 5
0
def main(args):

    messenger = mes.Messenger()

    (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats,
     beatmap) = utils.get_tuple_beats()

    messenger.message(['length_beats', str(length_beats)])

    filename_pickle = os.path.join(
        utils.get_dirname_score(), 'chord',
        ''.join([utils._get_name_project_most_recent(), '.pkl']))

    part_chord_thawed = utils_mxl.thaw_stream(filename_pickle)

    part_key_centers = analysis_mxl.get_key_center_estimates(part_chord_thawed)

    utils.create_dir_score()

    utils.create_dir_key_center()

    filename_pickle = os.path.join(
        utils.get_dirname_score(), 'key_center',
        ''.join([utils._get_name_project_most_recent(), '.pkl']))

    ################ TODO: remove

    # filename_part = os.path.join(
    #     utils.get_dirname_score(),
    #     'key_center',
    #     utils._get_name_project_most_recent() + '.pkl'
    # )
    #
    # part_key_centers = utils_mxl.thaw_stream(
    #     filename_part
    # )

    ###############

    utils_mxl.freeze_stream(part_key_centers, filename_pickle)

    notes_live = convert_mxl.to_notes_live(part_key_centers,
                                           beatmap=beatmap,
                                           s_beat_start=s_beat_start,
                                           s_beat_end=s_beat_end,
                                           tempo=tempo)

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, 'key_center')

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
Esempio n. 6
0
def main(args):

    utils.write_name_project(args.name_project)

    utils.create_dir_project()

    utils.create_dir_audio()

    utils.create_dir_audio_warped()

    utils.create_dir_vocals()

    utils.create_dir_session()

    audio_only = args.x

    include_video = not audio_only

    if include_video:
        utils.create_dir_video()

        utils.create_dir_video_warped()

        dir_video = utils.get_path_dir_video()

        command_to_video = [
            args.path_executable, '--o',
            dir_video + '/' + args.name_project + '.%(ext)s', '-f', 'mp4',
            '--ffmpeg-location', args.ffmpeg_location, args.url[0]
        ]

        subprocess.run(command_to_video,
                       stdout=subprocess.PIPE).stdout.rstrip().decode("utf-8")

    dir_audio = utils.get_path_dir_audio()

    command_to_audio = [
        args.path_executable, '-x', '--o',
        dir_audio + '/' + args.name_project + '.%(ext)s', '--audio-format',
        args.audio_format, '--ffmpeg-location', args.ffmpeg_location,
        args.url[0]
    ]

    subprocess.run(command_to_audio,
                   stdout=subprocess.PIPE).stdout.rstrip().decode("utf-8")

    messenger = mes.Messenger()

    messenger.message(['done', 'bang'])
Esempio n. 7
0
def main(args):

    messenger = mes.Messenger()

    (
        s_beat_start,
        s_beat_end,
        tempo,
        beat_start,
        beat_end,
        length_beats,
        beatmap
    ) = utils.get_tuple_beats()

    messenger.message(['length_beats', str(length_beats)])

    stream_chord = utils_mxl.thaw_stream(
        os.path.join(
            utils.get_dirname_score(),
            'chord',
            utils._get_name_project_most_recent() + '.pkl'
        )
    )

    stream_root = postp_mxl.extract_root(
        stream_chord
    )

    notes_live = convert_mxl.to_notes_live(
        stream_root,
        beatmap,
        s_beat_start,
        s_beat_end,
        tempo
    )

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, 'root')

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
def main(args):

    use_warped = utils.b_use_warped()

    data_melody = ir.extract_melody(
        os.path.join(
            utils.get_dirname_audio_warped()
            if use_warped else utils.get_dirname_audio(),
            utils._get_name_project_most_recent() + '.wav'))

    df_melody = prep_vamp.melody_to_df(
        (data_melody['vector'][0], data_melody['vector'][1]), index_type='s')

    df_melody[df_melody['melody'] < 0] = 0

    conv_max.to_coll(df_melody.rename(columns={'melody': 'signal'}),
                     conv_max.file_ts_coll)

    messenger = mes.Messenger()

    messenger.message(['done', 'bang'])
Esempio n. 9
0
def main(args):

    name_part = utils.parse_arg(args.name_part)

    importer = io_importer.Importer(utils.get_file_json_comm())

    importer.load([name_part])

    notes_live = importer.get_part(name_part)

    mode = 'polyphonic' if name_part == 'chord' else 'monophonic'

    (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats,
     beatmap) = utils.get_tuple_beats()

    messenger = mes.Messenger()

    messenger.message(['length_beats', str(length_beats)])

    stream = convert_mxl.live_to_stream(notes_live,
                                        beatmap=beatmap,
                                        s_beat_start=s_beat_start,
                                        s_beat_end=s_beat_end,
                                        tempo=tempo,
                                        mode=mode)

    notes_live = convert_mxl.to_notes_live(stream, beatmap, s_beat_start,
                                           s_beat_end, tempo)

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, name_part)

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
Esempio n. 10
0
def main(args):

    # filter_map: Dict[int, Dict[str, float]] = args.filter_map

    filter = 'medfilt'

    filter_map: Dict[int, Dict[str, float]] = {
        'kernel_size': [
            111,
            311,
            511
        ]
    }

    df_to_filter = conv_max.from_coll(
        filename=conv_max.file_ts_coll
    )

    params = filter_map

    for name_argument, values in params.items():
        for i_value, value in enumerate(values):
            df_to_filter['signal_filtered'] = \
                pd.Series(
                    getattr(signal, filter)(df_to_filter['signal'].as_matrix().reshape(-1, ), **{name_argument: value}),
                    dtype=np.float
                )

    conv_max.to_coll(
        df_to_filter[['pos', 'signal_filtered']].rename(columns={'signal_filtered': 'signal'}),
        filename=conv_max.file_ts_coll
    )

    messenger = mes.Messenger()

    messenger.message(['done', 'bang'])
def main(args):

    note_midi_lower = int(utils.parse_arg(args.note_midi_lower))
    note_midi_upper = int(utils.parse_arg(args.note_midi_upper))

    chromatic_scale = music21.scale.ChromaticScale(
        music21.pitch.Pitch(midi=note_midi_lower))

    # pitches = [
    #     str(p)
    #     for p
    #     in chromatic_scale.getPitches(
    #         music21.pitch.Pitch(midi=note_midi_lower),
    #         music21.pitch.Pitch(midi=note_midi_upper)
    #     )
    # ]
    #
    # pitches.insert(0, 0)
    #
    # alphabet_map = dict()
    #
    # for tuple in list(set(zip(list(string.ascii_lowercase), pitches))):
    #     alphabet_map[tuple[0]] = tuple[1]

    # this will put the letter 'l' at the front
    # alphabet_map_sorted = utils.rotate_items(OrderedDict(sorted(alphabet_map.items())), 11)
    #
    # n_bins = note_midi_upper - note_midi_lower + 2

    df = conv_max.from_coll(filename=conv_max.file_ts_coll)

    frequencies_hz = [
        p.frequency for p in chromatic_scale.getPitches(
            music21.pitch.Pitch(
                midi=note_midi_lower -
                1), music21.pitch.Pitch(midi=note_midi_upper + 1))
    ]

    pitches = chromatic_scale.getPitches(
        music21.pitch.Pitch(midi=note_midi_lower),
        music21.pitch.Pitch(midi=note_midi_upper))

    df['signal_discretized'] = df['signal'] * 0

    for pitch in pitches:
        range_hz = ((music21.pitch.Pitch(midi=pitch.midi - 1).frequency +
                     pitch.frequency) / 2,
                    (music21.pitch.Pitch(midi=pitch.midi + 1).frequency +
                     pitch.frequency) / 2)
        df['signal_discretized'][(range_hz[0] <= df['signal']) & (
            range_hz[1] >= df['signal'])] = pitch.frequency

    threshold_lower = (frequencies_hz[0] + frequencies_hz[1]) / 2

    threshold_upper = (frequencies_hz[-2] + frequencies_hz[-1]) / 2

    df['signal_discretized'][df['signal'] < threshold_lower] = 0

    df['signal_discretized'][df['signal'] > threshold_upper] = 0

    # sax = SAX(
    #     n_bins=n_bins,
    #     quantiles='empirical'
    # )

    # sfa = SFA(
    #     n_bins=n_bins,
    #     quantiles='empirical'
    # )

    # mcb = MCB(
    #     n_bins=n_bins,
    #     quantiles='gaussian'
    # )

    # def lookup_frequency(letter: str) -> int:
    #     return music21.pitch.Pitch(alphabet_map_sorted[letter]).frequency

    # df['signal_discretized'] = sax.fit_transform(df['signal'].values.reshape((1, len(df.index))))[0]
    # df['signal_discretized'] = sfa.fit_transform(df['signal'].values.reshape((1, len(df.index))))

    # df['signal_discretized'] = df['signal_discretized'].apply(lookup_frequency)

    conv_max.to_coll(df[['pos', 'signal_discretized'
                         ]].rename(columns={'signal_discretized': 'signal'}),
                     filename=conv_max.file_ts_coll_discrete)

    messenger = mes.Messenger()

    messenger.message(['done', 'bang'])
Esempio n. 12
0
def main(args):

    messenger = mes.Messenger()

    use_warped = utils.b_use_warped()

    (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats,
     beatmap) = utils.get_tuple_beats()

    messenger.message(['length_beats', str(length_beats)])

    representation = utils.parse_arg(args.representation)

    if representation == 'symbolic':

        filename_pickle = os.path.join(
            utils.get_dirname_score(), 'melody',
            ''.join([utils._get_name_project_most_recent(), '.pkl']))

        part_melody = utils_mxl.thaw_stream(filename_pickle)

        stream_segment = analysis_mxl.get_segments(part_melody)

    elif representation == 'numeric':

        data_segments = ir.extract_segments(
            os.path.join(
                utils.get_dirname_audio_warped()
                if use_warped else utils.get_dirname_audio(),
                utils._get_name_project_most_recent() + '.wav'))

        df_segments = prep_vamp.segments_to_df(data_segments)

        segment_tree = quantize.get_interval_tree(df_segments, diff=False)

        data_quantized = quantize.quantize(beatmap,
                                           s_beat_start,
                                           s_beat_end,
                                           trees={'segment': segment_tree})

        data_quantized_segments = data_quantized['segment']

        score = postp_mxl.df_grans_to_score(data_quantized_segments,
                                            parts=['segment'])

        stream_segment = postp_mxl.extract_part(score, 'segment')

    else:
        raise ' '.join(['representation', representation, 'does not exist'])

    utils.create_dir_score()

    utils.create_dir_segment()

    filename_pickle = os.path.join(
        utils.get_dirname_score(), 'segment',
        ''.join([utils._get_name_project_most_recent(), '.pkl']))

    utils_mxl.freeze_stream(stream_segment, filename_pickle)

    notes_live = convert_mxl.to_notes_live(stream_segment, beatmap,
                                           s_beat_start, s_beat_end, tempo)

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, 'segment')

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
Esempio n. 13
0
def main(args):

    use_warped = utils.b_use_warped()

    name_part = utils.parse_arg(args.name_part)

    beat_multiple_quantization = utils.parse_arg(args.beat_multiple)

    quarter_length_divisor = 1/float(beat_multiple_quantization)

    importer = io_importer.Importer(
        utils.get_file_json_comm()
    )

    importer.load([name_part])

    notes_live = importer.get_part(name_part)

    mode = 'polyphonic' if name_part == 'chord' else 'monophonic'

    (
        s_beat_start,
        s_beat_end,
        tempo,
        beat_start,
        beat_end,
        length_beats,
        beatmap
    ) = utils.get_tuple_beats()

    messenger = mes.Messenger()

    messenger.message(['length_beats', str(length_beats)])

    stream = convert_mxl.live_to_stream(
        notes_live,
        beatmap=beatmap,
        s_beat_start=s_beat_start,
        s_beat_end=s_beat_end,
        tempo=tempo,
        mode=mode
    )

    # TODO: replace with logic in "granularize.py"
    if name_part == 'melody':

        data_melody = conv_vamp.to_data_melody(
            notes_live,
            offset_s_audio=0,
            duration_s_audio=utils.get_duration_s_audio(
                filename=os.path.join(
                    utils.get_dirname_audio_warped() if use_warped else utils.get_dirname_audio(),
                    utils._get_name_project_most_recent() + '.wav'
                )
            )
        )

        df_melody = prep_vamp.melody_to_df(
            (data_melody['vector'][0], data_melody['vector'][1]),
            index_type='s'
        )

        df_melody[df_melody['melody'] < 0] = 0

        melody_tree = quantize.get_interval_tree(
            df_melody
        )

        data_quantized = quantize.quantize(
            beatmap,
            s_beat_start,
            s_beat_end,
            trees={
                'melody': melody_tree
            }
        )

        data_quantized_melody = data_quantized['melody']

        score = postp_mxl.df_grans_to_score(
            data_quantized_melody,
            parts=['melody']
        )

        stream = postp_mxl.extract_part(
            score,
            'melody'
        )
    else:
        stream.quantize(
            (quarter_length_divisor, ),
            inPlace=True
        )

    notes_live = convert_mxl.to_notes_live(
        stream,
        beatmap=beatmap,
        s_beat_start=s_beat_start,
        s_beat_end=s_beat_end,
        tempo=tempo,
        bypass_seconds=True
    )

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, name_part)

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
Esempio n. 14
0
def main(args):

    use_warped = args.m

    messenger = mes.Messenger()

    s_beat_start = float(utils.parse_arg(args.s_beat_start)) if utils.parse_arg(args.s_beat_start) else None

    s_beat_end = float(utils.parse_arg(args.s_beat_end)) if utils.parse_arg(args.s_beat_end) else None

    tempo = float(utils.parse_arg(args.tempo)) if utils.parse_arg(args.tempo) else None

    beat_start = float(utils.parse_arg(args.beat_start)) if utils.parse_arg(args.beat_start) else None

    beat_end = float(utils.parse_arg(args.beat_end)) if utils.parse_arg(args.beat_end) else None

    length_beats = float(utils.parse_arg(args.length_beats)) if utils.parse_arg(args.length_beats) else None

    filename_wav = os.path.join(
        utils.get_dirname_audio_warped() if use_warped else utils.get_dirname_audio(),
        utils._get_name_project_most_recent() + '.wav'
    )

    if args.a:
        s_beat_start = 0
        s_beat_end = utils.get_duration_s_audio(filename=filename_wav)


    # NB: to look up beat in beatmap, given a beat in Live
    # subtract one from measure, multply by 4, then subtract one beat
    # e.g., 74.1.1 => beatmap_manual[73*4 + 0]

    if use_warped:

        s_beat_start = 0

        s_beat_end = utils.get_duration_s_audio(
            filename=filename_wav
        )

        beatmap = np.linspace(
            0,
            s_beat_end,
            int(beat_end) - int(beat_start) + 1
        )

    else:

        beatmap = [val.to_float() for val in ir.extract_beats(filename_wav)]

        length_beats = utils.get_num_beats(beatmap, s_beat_start, s_beat_end)

        if args.double:
            beatmap = utils.double_beatmap(beatmap)
            length_beats = length_beats*2
        elif args.halve:
            beatmap = utils.halve_beatmap(beatmap)
            length_beats = length_beats/2

        # TODO: does this mean that all unwarped audio is assumed to be cropped?
        beat_start = 0

        beat_end = beat_start + length_beats - 1

    utils.create_dir_beat(

    )

    filepath_beatmap = os.path.join(
        utils.get_dirname_beat(),
        utils._get_name_project_most_recent() + '.pkl'
    )

    data_beats = {
        's_beat_start': s_beat_start,
        's_beat_end': s_beat_end,
        'tempo': tempo,
        'beat_start': float(beat_start),
        'beat_end': float(beat_end),
        'length_beats': float(length_beats),
        'beatmap': beatmap
    }

    if args.dump_estimates:
        for beat in beatmap:
            messenger.message(['beat_relative', str(beat/s_beat_end)])

    utils.to_pickle(
        data_beats,
        filepath_beatmap
    )

    messenger.message(['done', 'bang'])
Esempio n. 15
0
def main(args):

    (
        s_beat_start,
        s_beat_end,
        tempo,
        beat_start,
        beat_end,
        length_beats,
        beatmap
    ) = utils.get_tuple_beats()

    messenger = mes.Messenger()

    messenger.message(['length_beats', str(length_beats)])

    df = conv_max.from_coll(
        conv_max.file_ts_coll
    )

    df = conv_mid.hz_to_mid(
        df.rename(
            columns={'signal': 'melody'}
        )
    )

    df_melody_diff = filt_midi.to_diff(
        df,
        'melody'
    )

    sample_rate = .0029

    df_melody_diff.index = df_melody_diff.index * sample_rate

    # TODO: add index s before quantizing

    tree_melody = quantize.get_interval_tree(
        df_melody_diff
    )

    data_quantized = quantize.quantize(
        beatmap,
        s_beat_start,
        s_beat_end,
        trees={
            'melody': tree_melody
        }
    )

    score = postp_mxl.df_grans_to_score(
        data_quantized['melody'],
        parts=['melody']
    )

    exporter = io_exporter.Exporter()

    part_melody = postp_mxl.extract_part(
        score,
        'melody'
    )

    exporter.set_part(
        notes=conv_mxl.to_notes_live(
            part_melody,
            beatmap,
            s_beat_start,
            s_beat_end,
            tempo
        ),
        name_part='melody'
    )

    exporter.export(
        utils.get_file_json_comm()
    )

    messenger.message(['done', 'bang'])
def main(args):
    messenger = mes.Messenger()

    file_input = utils.parse_arg(args.file_input)

    file_output = utils.parse_arg(args.file_output)

    name_part = utils.parse_arg(args.name_part)

    score = converter.parse(file_input)

    part_new = stream.Part()

    for p in score:
        if isinstance(p, stream.Part):
            for i in range(num_measures_lead_in + 1,
                           p.measure(-1).measureNumber + 1):
                m = p.measure(i)

                chord_symbols = [
                    c for c in m if isinstance(c, harmony.ChordSymbol)
                ]

                if len(chord_symbols) == 0:
                    if name_part == 'chord':
                        chord_new = chord.Chord(
                            [p.midi for p in chord_sym_last.pitches
                             ],  # NB: we want to fail in this case
                            duration=duration.Duration(4))
                        part_new.append(chord_new)
                        chord_sym_last = chord_new
                    elif name_part == 'root':
                        note_new = chord.Chord(
                            [[p.midi for p in chord_sym_last.pitches][0]
                             ],  # NB: we want to fail in this case
                            duration=duration.Duration(4))
                        part_new.append(note_new)
                        chord_sym_last = chord.Chord(
                            [p.midi for p in chord_sym_last.pitches],
                            duration=duration.Duration(4))
                    else:
                        raise Exception(
                            'cannot parse name_part from BIAB musicxml')
                else:
                    for sym in chord_symbols:
                        if name_part == 'chord':
                            chord_new = chord.Chord(
                                [p.midi for p in sym.pitches],
                                duration=duration.Duration(4 /
                                                           len(chord_symbols)))
                            part_new.append(chord_new)
                            chord_sym_last = chord_new
                        elif name_part == 'root':
                            note_new = note.Note([p.midi
                                                  for p in sym.pitches][0],
                                                 duration=duration.Duration(
                                                     4 / len(chord_symbols)))

                            part_new.append(note_new)
                            chord_sym_last = chord.Chord(
                                [p.midi for p in sym.pitches],
                                duration=duration.Duration(4 /
                                                           len(chord_symbols)))
                        else:
                            raise Exception(
                                'cannot parse name_part from BIAB musicxml')

    if name_part == 'chord':
        part_new = postp_mxl.force_texture(part_new, num_voices=4)

    part_new.write('midi', fp=file_output)

    messenger.message(['done', 'bang'])
Esempio n. 17
0
def main(args):

    messenger = mes.Messenger()

    (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats,
     beatmap) = utils.get_tuple_beats()

    messenger.message(['length_beats', str(length_beats)])

    data_chords = ir.extract_chords(
        os.path.join(
            utils.get_dirname_audio_warped()
            if utils.b_use_warped() else utils.get_dirname_audio(),
            utils._get_name_project_most_recent() + '.wav'))

    non_empty_chords = vamp_filter.filter_non_chords(data_chords)

    # TODO: refactor, this is slow
    events_chords: Dict[float,
                        music21.chord.Chord] = vamp_convert.vamp_chord_to_dict(
                            non_empty_chords)

    df_chords = prep_vamp.chords_to_df(events_chords)

    chord_tree = quantize.get_interval_tree(df_chords, diff=False)

    data_quantized = quantize.quantize(beatmap,
                                       s_beat_start,
                                       s_beat_end,
                                       trees={'chord': chord_tree})

    data_quantized_chords = data_quantized['chord']

    score = postp_mxl.df_grans_to_score(data_quantized_chords, parts=['chord'])

    part_chord = postp_mxl.extract_part(score, 'chord')

    part_chord = postp_mxl.force_texture(part_chord, num_voices=4)

    utils.create_dir_score()

    utils.create_dir_chord()

    filename_pickle = os.path.join(
        utils.get_dirname_score(), 'chord',
        ''.join([utils._get_name_project_most_recent(), '.pkl']))

    utils_mxl.freeze_stream(part_chord, filename_pickle)

    notes_live = convert_mxl.to_notes_live(part_chord,
                                           beatmap=beatmap,
                                           s_beat_start=s_beat_start,
                                           s_beat_end=s_beat_end,
                                           tempo=tempo)

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, 'chord')

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
def main(args):

    (
        s_beat_start,
        s_beat_end,
        tempo,
        beat_start,
        beat_end,
        length_beats,
        beatmap
    ) = utils.get_tuple_beats()

    messenger = mes.Messenger()

    messenger.message(['length_beats', str(length_beats)])

    ts_beatmap = prep_vamp.beatmap_to_ts(
        beatmap
    )

    df_beatmap = prep_vamp.ts_beatmap_to_df(
        ts_beatmap
    )

    beatmap_tree = quantize.get_interval_tree(
        df_beatmap,
        diff=False,
        preserve_struct=True
    )

    data_quantized = quantize.quantize(
        beatmap,
        s_beat_start,
        s_beat_end,
        trees={
            'beatmap': beatmap_tree
        }
    )

    data_quantized_beats = data_quantized['beatmap']

    score = postp_mxl.df_grans_to_score(
        data_quantized_beats,
        parts=['beatmap'],
        type_equality='absolute'
    )

    stream_beatmap = postp_mxl.extract_part(
        score,
        'beatmap'
    )

    notes_live = convert_mxl.to_notes_live(
        stream_beatmap,
        beatmap,
        s_beat_start,
        s_beat_end,
        tempo
    )

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, 'beatmap')

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])