def extract_tempo():
    project_name = utils._get_name_project_most_recent()

    data, rate = librosa.load(
        os.path.join(utils.get_dirname_audio(), project_name + '.wav'))

    data_tempo = vamp.collect(data, rate, "qm-vamp-plugins:qm-tempotracker")

    subprocess.run(
        ['mkdir', os.path.join(utils.get_project_dir(), 'tempo')],
        stdout=subprocess.PIPE)

    estimate_tempo = vamp_prep.to_tempo(data_tempo)

    utils.to_pickle(estimate_tempo,
                    os.path.join(utils.get_dirname_audio(), project_name))

    return estimate_tempo
def main(args):

    use_warped = utils.b_use_warped()

    data_melody = ir.extract_melody(
        os.path.join(
            utils.get_dirname_audio_warped()
            if use_warped else utils.get_dirname_audio(),
            utils._get_name_project_most_recent() + '.wav'))

    df_melody = prep_vamp.melody_to_df(
        (data_melody['vector'][0], data_melody['vector'][1]), index_type='s')

    df_melody[df_melody['melody'] < 0] = 0

    conv_max.to_coll(df_melody.rename(columns={'melody': 'signal'}),
                     conv_max.file_ts_coll)

    messenger = mes.Messenger()

    messenger.message(['done', 'bang'])
Exemple #3
0
def main(args):

    messenger = mes.Messenger()

    (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats,
     beatmap) = utils.get_tuple_beats()

    messenger.message(['length_beats', str(length_beats)])

    data_chords = ir.extract_chords(
        os.path.join(
            utils.get_dirname_audio_warped()
            if utils.b_use_warped() else utils.get_dirname_audio(),
            utils._get_name_project_most_recent() + '.wav'))

    non_empty_chords = vamp_filter.filter_non_chords(data_chords)

    # TODO: refactor, this is slow
    events_chords: Dict[float,
                        music21.chord.Chord] = vamp_convert.vamp_chord_to_dict(
                            non_empty_chords)

    df_chords = prep_vamp.chords_to_df(events_chords)

    chord_tree = quantize.get_interval_tree(df_chords, diff=False)

    data_quantized = quantize.quantize(beatmap,
                                       s_beat_start,
                                       s_beat_end,
                                       trees={'chord': chord_tree})

    data_quantized_chords = data_quantized['chord']

    score = postp_mxl.df_grans_to_score(data_quantized_chords, parts=['chord'])

    part_chord = postp_mxl.extract_part(score, 'chord')

    part_chord = postp_mxl.force_texture(part_chord, num_voices=4)

    utils.create_dir_score()

    utils.create_dir_chord()

    filename_pickle = os.path.join(
        utils.get_dirname_score(), 'chord',
        ''.join([utils._get_name_project_most_recent(), '.pkl']))

    utils_mxl.freeze_stream(part_chord, filename_pickle)

    notes_live = convert_mxl.to_notes_live(part_chord,
                                           beatmap=beatmap,
                                           s_beat_start=s_beat_start,
                                           s_beat_end=s_beat_end,
                                           tempo=tempo)

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, 'chord')

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
Exemple #4
0
def main(args):

    messenger = mes.Messenger()

    use_warped = utils.b_use_warped()

    (s_beat_start, s_beat_end, tempo, beat_start, beat_end, length_beats,
     beatmap) = utils.get_tuple_beats()

    messenger.message(['length_beats', str(length_beats)])

    representation = utils.parse_arg(args.representation)

    if representation == 'symbolic':

        filename_pickle = os.path.join(
            utils.get_dirname_score(), 'melody',
            ''.join([utils._get_name_project_most_recent(), '.pkl']))

        part_melody = utils_mxl.thaw_stream(filename_pickle)

        stream_segment = analysis_mxl.get_segments(part_melody)

    elif representation == 'numeric':

        data_segments = ir.extract_segments(
            os.path.join(
                utils.get_dirname_audio_warped()
                if use_warped else utils.get_dirname_audio(),
                utils._get_name_project_most_recent() + '.wav'))

        df_segments = prep_vamp.segments_to_df(data_segments)

        segment_tree = quantize.get_interval_tree(df_segments, diff=False)

        data_quantized = quantize.quantize(beatmap,
                                           s_beat_start,
                                           s_beat_end,
                                           trees={'segment': segment_tree})

        data_quantized_segments = data_quantized['segment']

        score = postp_mxl.df_grans_to_score(data_quantized_segments,
                                            parts=['segment'])

        stream_segment = postp_mxl.extract_part(score, 'segment')

    else:
        raise ' '.join(['representation', representation, 'does not exist'])

    utils.create_dir_score()

    utils.create_dir_segment()

    filename_pickle = os.path.join(
        utils.get_dirname_score(), 'segment',
        ''.join([utils._get_name_project_most_recent(), '.pkl']))

    utils_mxl.freeze_stream(stream_segment, filename_pickle)

    notes_live = convert_mxl.to_notes_live(stream_segment, beatmap,
                                           s_beat_start, s_beat_end, tempo)

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, 'segment')

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
Exemple #5
0
def main(args):

    use_warped = utils.b_use_warped()

    name_part = utils.parse_arg(args.name_part)

    beat_multiple_quantization = utils.parse_arg(args.beat_multiple)

    quarter_length_divisor = 1/float(beat_multiple_quantization)

    importer = io_importer.Importer(
        utils.get_file_json_comm()
    )

    importer.load([name_part])

    notes_live = importer.get_part(name_part)

    mode = 'polyphonic' if name_part == 'chord' else 'monophonic'

    (
        s_beat_start,
        s_beat_end,
        tempo,
        beat_start,
        beat_end,
        length_beats,
        beatmap
    ) = utils.get_tuple_beats()

    messenger = mes.Messenger()

    messenger.message(['length_beats', str(length_beats)])

    stream = convert_mxl.live_to_stream(
        notes_live,
        beatmap=beatmap,
        s_beat_start=s_beat_start,
        s_beat_end=s_beat_end,
        tempo=tempo,
        mode=mode
    )

    # TODO: replace with logic in "granularize.py"
    if name_part == 'melody':

        data_melody = conv_vamp.to_data_melody(
            notes_live,
            offset_s_audio=0,
            duration_s_audio=utils.get_duration_s_audio(
                filename=os.path.join(
                    utils.get_dirname_audio_warped() if use_warped else utils.get_dirname_audio(),
                    utils._get_name_project_most_recent() + '.wav'
                )
            )
        )

        df_melody = prep_vamp.melody_to_df(
            (data_melody['vector'][0], data_melody['vector'][1]),
            index_type='s'
        )

        df_melody[df_melody['melody'] < 0] = 0

        melody_tree = quantize.get_interval_tree(
            df_melody
        )

        data_quantized = quantize.quantize(
            beatmap,
            s_beat_start,
            s_beat_end,
            trees={
                'melody': melody_tree
            }
        )

        data_quantized_melody = data_quantized['melody']

        score = postp_mxl.df_grans_to_score(
            data_quantized_melody,
            parts=['melody']
        )

        stream = postp_mxl.extract_part(
            score,
            'melody'
        )
    else:
        stream.quantize(
            (quarter_length_divisor, ),
            inPlace=True
        )

    notes_live = convert_mxl.to_notes_live(
        stream,
        beatmap=beatmap,
        s_beat_start=s_beat_start,
        s_beat_end=s_beat_end,
        tempo=tempo,
        bypass_seconds=True
    )

    exporter = io_exporter.Exporter()

    exporter.set_part(notes_live, name_part)

    exporter.export(utils.get_file_json_comm())

    messenger.message(['done', 'bang'])
def main(args):

    use_warped = args.m

    messenger = mes.Messenger()

    s_beat_start = float(utils.parse_arg(args.s_beat_start)) if utils.parse_arg(args.s_beat_start) else None

    s_beat_end = float(utils.parse_arg(args.s_beat_end)) if utils.parse_arg(args.s_beat_end) else None

    tempo = float(utils.parse_arg(args.tempo)) if utils.parse_arg(args.tempo) else None

    beat_start = float(utils.parse_arg(args.beat_start)) if utils.parse_arg(args.beat_start) else None

    beat_end = float(utils.parse_arg(args.beat_end)) if utils.parse_arg(args.beat_end) else None

    length_beats = float(utils.parse_arg(args.length_beats)) if utils.parse_arg(args.length_beats) else None

    filename_wav = os.path.join(
        utils.get_dirname_audio_warped() if use_warped else utils.get_dirname_audio(),
        utils._get_name_project_most_recent() + '.wav'
    )

    if args.a:
        s_beat_start = 0
        s_beat_end = utils.get_duration_s_audio(filename=filename_wav)


    # NB: to look up beat in beatmap, given a beat in Live
    # subtract one from measure, multply by 4, then subtract one beat
    # e.g., 74.1.1 => beatmap_manual[73*4 + 0]

    if use_warped:

        s_beat_start = 0

        s_beat_end = utils.get_duration_s_audio(
            filename=filename_wav
        )

        beatmap = np.linspace(
            0,
            s_beat_end,
            int(beat_end) - int(beat_start) + 1
        )

    else:

        beatmap = [val.to_float() for val in ir.extract_beats(filename_wav)]

        length_beats = utils.get_num_beats(beatmap, s_beat_start, s_beat_end)

        if args.double:
            beatmap = utils.double_beatmap(beatmap)
            length_beats = length_beats*2
        elif args.halve:
            beatmap = utils.halve_beatmap(beatmap)
            length_beats = length_beats/2

        # TODO: does this mean that all unwarped audio is assumed to be cropped?
        beat_start = 0

        beat_end = beat_start + length_beats - 1

    utils.create_dir_beat(

    )

    filepath_beatmap = os.path.join(
        utils.get_dirname_beat(),
        utils._get_name_project_most_recent() + '.pkl'
    )

    data_beats = {
        's_beat_start': s_beat_start,
        's_beat_end': s_beat_end,
        'tempo': tempo,
        'beat_start': float(beat_start),
        'beat_end': float(beat_end),
        'length_beats': float(length_beats),
        'beatmap': beatmap
    }

    if args.dump_estimates:
        for beat in beatmap:
            messenger.message(['beat_relative', str(beat/s_beat_end)])

    utils.to_pickle(
        data_beats,
        filepath_beatmap
    )

    messenger.message(['done', 'bang'])