Exemple #1
0
def process(request, audio_id):
    # query db for the audio file
    audio = get_object_or_404(Audio, pk=audio_id)

    guitar_model = True
    try:
        frets = int(request.GET['frets'])
        capo = int(request.GET['capo'])
        tuning = request.GET['tuning']
        pitch_sanitize_prune = True if request.GET['sanitize'] == 'prune' else False
    except KeyError:
        # don't use a guitar model for polyphonic transcription
        guitar_model = False

    db_fields = {'fk_audio': audio}
    if guitar_model:
        guitar = GuitarModel(
            num_frets=frets,
            capo=capo,
            tuning=tuning
        )
        guitar.save()

        db_fields['fk_guitar'] = guitar
        db_fields['pitch_sanitize_prune'] = pitch_sanitize_prune

    pestimator = PitchDetect(**db_fields)
    # writing to the database writes the analysis start timestamp
    pestimator.save()

    # TODO: create spinner on the interface
    audio_url = request.build_absolute_uri(audio.audio_file.url)
    pestimator.estimate_pitches(audio_url)

    # redirect to xml display
    return HttpResponseRedirect('/media/%s' % pestimator.fk_pmei.mei_file.name)
Exemple #2
0
    def transcribe(self, frets, capo, tuning, pitch_sanitize_prune, audio_url):
        # get path of audio file being transcribed
        path = os.path.join(settings.MEDIA_ROOT, str(self.fk_audio.audio_file))

        ####################
        # PITCH ESTIMATION #
        ####################
        guitar = GuitarModel(
            num_frets=frets,
            capo=capo,
            tuning=tuning
        )
        guitar.save()

        pestimator = PitchDetect(fk_audio=self.fk_audio, fk_guitar=guitar, pitch_sanitize_prune=pitch_sanitize_prune)
        # writing to the database writes the analysis start timestamp
        pestimator.save()

        pestimator.estimate_pitches(audio_url)

        # attach the pitch detection analysis information to the transcription model
        self.fk_pid = pestimator

        ########################
        # TABLATURE GENERATION #
        ########################
        taber = Tabulate(fk_pmei=self.fk_pid.fk_pmei, fk_guitar=guitar, pitch_sanitize_prune=pitch_sanitize_prune)
        # writing to the database writes the analysis start timestamp
        taber.save()

        taber.gen_tab()

        # attach the tablature to the transcription model
        self.fk_tabid = taber

        self.save()
Exemple #3
0
    def transcribe(self, frets, capo, tuning, pitch_sanitize_prune, audio_url):
        # get path of audio file being transcribed
        path = os.path.join(settings.MEDIA_ROOT, str(self.fk_audio.audio_file))

        ####################
        # PITCH ESTIMATION #
        ####################
        guitar = GuitarModel(num_frets=frets, capo=capo, tuning=tuning)
        guitar.save()

        pestimator = PitchDetect(fk_audio=self.fk_audio,
                                 fk_guitar=guitar,
                                 pitch_sanitize_prune=pitch_sanitize_prune)
        # writing to the database writes the analysis start timestamp
        pestimator.save()

        pestimator.estimate_pitches(audio_url)

        # attach the pitch detection analysis information to the transcription model
        self.fk_pid = pestimator

        ########################
        # TABLATURE GENERATION #
        ########################
        taber = Tabulate(fk_pmei=self.fk_pid.fk_pmei,
                         fk_guitar=guitar,
                         pitch_sanitize_prune=pitch_sanitize_prune)
        # writing to the database writes the analysis start timestamp
        taber.save()

        taber.gen_tab()

        # attach the tablature to the transcription model
        self.fk_tabid = taber

        self.save()