Esempio n. 1
0
    def run(self, musicbrainzid, fname):
        audioAnalyzer = AudioAnalyzer(verbose=True)
        jointAnalyzer = JointAnalyzer(verbose=True)

        audioAnalyzer.set_pitch_extractor_params(hop_size=196, bin_resolution=7.5)
        # predominant melody extraction
        audio_pitch = audioAnalyzer.extract_pitch(fname)

        joint_pitch = None
        notes = {}
        rec_data = dunya.makam.get_recording(musicbrainzid)
        for w in rec_data['works']:
            symbtr_file = util.docserver_get_symbtrtxt(w['mbid'])
            print(symbtr_file)

            if symbtr_file:
                score_features_file = util.docserver_get_filename(w['mbid'], "scoreanalysis", "metadata", version="0.1")
                score_features = json.load(open(score_features_file))
                joint_features, features = jointAnalyzer.analyze(
                    symbtr_file, score_features, fname, audio_pitch)

                # redo some steps in audio analysis
                features = audioAnalyzer.analyze(
                    metadata=False, pitch=False, **features)

                # get a summary of the analysis
                summarized_features = jointAnalyzer.summarize(
                    score_features=score_features, joint_features=joint_features,
                    score_informed_audio_features=features)

                joint_pitch = summarized_features['audio'].get('pitch', None)

                notes[w['mbid']] = summarized_features['joint'].get('notes', None)

        if joint_pitch:
            audio_pitch = joint_pitch

        pitch = [p[1] for p in audio_pitch['pitch']]

        # pitches as bytearray
        packed_pitch = six.BytesIO()
        max_pitch = max(pitch)
        temp = [p for p in pitch if p > 0]
        min_pitch = min(temp)

        height = 255
        for p in pitch:
            if p < min_pitch:
                packed_pitch.write(struct.pack("B", 0))
            else:
                packed_pitch.write(struct.pack("B", int((p - min_pitch) * 1.0 / (max_pitch - min_pitch) * height)))

        output = {}
        output['pitch'] = packed_pitch.getvalue()
        output['pitchmax'] = {'max': max_pitch, 'min': min_pitch}

        return output
Esempio n. 2
0
    def run(self, musicbrainzid, fname):
        server_name = socket.gethostname()
        subprocess_env = os.environ.copy()
        subprocess_env["MCR_CACHE_ROOT"] = "/tmp/emptydir"
        subprocess_env["LD_LIBRARY_PATH"] = (
            "/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/runtime/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/bin/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/sys/os/glnxa64"
            % ((server_name,) * 3)
        )
        # subprocess_env["LD_LIBRARY_PATH"] = "/usr/local/MATLAB/MATLAB_Runtime/v85/runtime/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/bin/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/os/glnxa64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/server"
        rec_data = dunya.makam.get_recording(musicbrainzid)

        if len(rec_data["works"]) == 0:
            raise Exception("No work on recording %s" % musicbrainzid)

        symbtrtxt = util.docserver_get_symbtrtxt(rec_data["works"][0]["mbid"])
        if not symbtrtxt:
            raise Exception("No work on recording %s" % musicbrainzid)

        metadata = util.docserver_get_filename(rec_data["works"][0]["mbid"], "metadata", "metadata", version="0.1")

        mp3file = fname
        mlbinary = util.docserver_get_filename(musicbrainzid, "makampitch", "matlab", version="0.6")
        output = tempfile.mkdtemp()

        proc = subprocess.Popen(
            ["/srv/dunya/extractTonicTempoTuning %s %s %s %s %s" % (symbtrtxt, metadata, mp3file, mlbinary, output)],
            stdout=subprocess.PIPE,
            shell=True,
            env=subprocess_env,
        )

        (out, err) = proc.communicate()

        ret = {}
        expected = ["tempo", "tonic", "tuning"]
        for f in expected:
            if os.path.isfile(os.path.join(output, f + ".json")):
                json_file = open(os.path.join(output, f + ".json"))
                ret[f] = json.loads(json_file.read())
                json_file.close()
                os.remove(os.path.join(output, f + ".json"))
            else:
                raise Exception("Missing output %s file for %s" % (f, musicbrainzid))
        os.rmdir(output)

        return ret
Esempio n. 3
0
    def run(self, musicbrainzid, fname):
        server_name = socket.gethostname()
        subprocess_env = os.environ.copy()
        subprocess_env["MCR_CACHE_ROOT"] = "/tmp/emptydir"
        subprocess_env["LD_LIBRARY_PATH"] = "/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/runtime/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/bin/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/sys/os/glnxa64" % ((server_name,)*3)
        #subprocess_env["LD_LIBRARY_PATH"] = "/usr/local/MATLAB/MATLAB_Runtime/v85/runtime/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/bin/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/os/glnxa64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sy    s/java/jre/glnxa64/jre/lib/amd64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/server"
        rec_data = dunya.makam.get_recording(musicbrainzid)
 
        if len(rec_data['works']) == 0:
            raise Exception('No work on recording %s' % musicbrainzid)
 
        symbtrtxt =util.docserver_get_symbtrtxt(rec_data['works'][0]['mbid'])
        if not symbtrtxt:
            raise Exception('No work on recording %s' % musicbrainzid)
        metadata = util.docserver_get_filename(rec_data['works'][0]['mbid'], "metadata", "metadata", version="0.1")
        tonic = util.docserver_get_filename(musicbrainzid, "tonictempotuning", "tonic", version="0.1")
        tempo = util.docserver_get_filename(musicbrainzid, "tonictempotuning", "tempo", version="0.1")
        tuning = util.docserver_get_filename(musicbrainzid, "tonictempotuning", "tuning", version="0.1")
        melody = util.docserver_get_filename(musicbrainzid, "makampitch", "matlab", version="0.6")

        mp3file = fname
        output = tempfile.mkdtemp()
        print "/srv/dunya/alignAudioScore %s %s '' %s %s %s %s %s %s" % (symbtrtxt, metadata, mp3file, melody, tonic, tempo, tuning, output) 
        proc = subprocess.Popen(["/srv/dunya/alignAudioScore %s %s '' %s %s %s %s %s %s" % (symbtrtxt, metadata, mp3file, melody, tonic, tempo, tuning, output)], stdout=subprocess.PIPE, shell=True, env=subprocess_env)
        (out, err) = proc.communicate()
        
        ret = {}
        if os.path.isfile(os.path.join(output, 'alignedNotes.json')):
            json_file = open(os.path.join(output, 'alignedNotes.json'))
            ret['notesalign'] = json.loads(json_file.read())
            json_file.close()
            os.remove(os.path.join(output, 'alignedNotes.json'))
        if os.path.isfile(os.path.join(output, 'sectionLinks.json')):
            json_file = open(os.path.join(output, 'sectionLinks.json'))
            ret['sectionlinks'] = json.loads(json_file.read())
            json_file.close()
            os.remove(os.path.join(output, 'sectionLinks.json'))
        
        os.rmdir(output)

        return ret
Esempio n. 4
0
    def run(self, musicbrainzid, fname):
        output = {
            "works_intervals": {},
            "tonic": {},
            "pitch": {},
            "melodic_progression": {},
            "tempo": {},
            "pitch_distribution": {},
            "pitch_class_distribution": {},
            "transposition": {},
            "makam": {},
            "note_models": {},
            "notes": {},
            "sections": {}
        }

        audioAnalyzer = AudioAnalyzer(verbose=True)
        jointAnalyzer = JointAnalyzer(verbose=True)

        # predominant melody extraction
        pitchfile = util.docserver_get_filename(musicbrainzid,
                                                "audioanalysis",
                                                "pitch",
                                                version="0.1")
        audio_pitch = json.load(open(pitchfile))

        output['pitch'] = None
        rec_data = dunya.makam.get_recording(musicbrainzid)
        for w in rec_data['works']:
            symbtr_file = util.docserver_get_symbtrtxt(w['mbid'])
            print symbtr_file
            if symbtr_file:
                score_features_file = util.docserver_get_filename(
                    w['mbid'], "scoreanalysis", "metadata", version="0.1")
                score_features = json.load(open(score_features_file))
                joint_features, features = jointAnalyzer.analyze(
                    symbtr_file, score_features, fname, audio_pitch)

                # redo some steps in audio analysis
                features = audioAnalyzer.analyze(metadata=False,
                                                 pitch=False,
                                                 **features)

                # get a summary of the analysis
                summarized_features = jointAnalyzer.summarize(
                    score_features=score_features,
                    joint_features=joint_features,
                    score_informed_audio_features=features)
                audio_pitch = summarized_features['audio'].get('pitch', None)

                pitch = summarized_features['audio'].get('pitch', None)
                if pitch:
                    pitch['pitch'] = pitch['pitch'].tolist()
                melodic_progression = features.get('melodic_progression', None)
                tonic = features.get('tonic', None)
                tempo = features.get('tempo', None)
                pitch_distribution = features.get('pitch_distribution', None)
                pitch_class_distribution = features.get(
                    'pitch_class_distribution', None)
                transposition = features.get('transposition', None)
                makam = features.get('makam', None)
                note_models = features.get('note_models', None)
                notes = summarized_features['joint'].get('notes', None)
                sections = summarized_features['joint'].get('sections', None)

                if pitch_distribution:
                    pitch_distribution = pitch_distribution.to_dict()
                if pitch_class_distribution:
                    pitch_class_distribution = pitch_class_distribution.to_dict(
                    )
                if note_models:
                    note_models = to_dict(note_models)
                if melodic_progression:
                    AudioSeyirAnalyzer.serialize(melodic_progression)

                if notes:
                    min_interval = 9999
                    max_interval = 0
                    for i in notes:
                        if i['interval'][0] < min_interval:
                            min_interval = i['interval'][0]
                        if i['interval'][1] > max_interval:
                            max_interval = i['interval'][1]

                    output["works_intervals"][w['mbid']] = {
                        "from": min_interval,
                        "to": max_interval
                    }
                output["pitch"] = pitch
                output["melodic_progression"] = melodic_progression
                output["pitch_distribution"] = pitch_distribution
                output["pitch_class_distribution"] = pitch_class_distribution
                output["tempo"][w['mbid']] = tempo
                output["tonic"][w['mbid']] = tonic
                output["transposition"][w['mbid']] = transposition
                output["makam"][w['mbid']] = makam
                output["note_models"][w['mbid']] = note_models
                output["notes"][w['mbid']] = notes
                output["sections"][w['mbid']] = sections

        return output
Esempio n. 5
0
    def run(self, musicbrainzid, fname):
        output = {
            "works_intervals": {},
            "tonic": {},
            "pitch": {},
            "melodic_progression": {},
            "tempo": {},
            "pitch_distribution": {},
            "pitch_class_distribution": {},
            "transposition": {},
            "makam": {},
            "note_models": {},
            "notes": {},
            "sections": {}
        }

        audioAnalyzer = AudioAnalyzer(verbose=True)
        jointAnalyzer = JointAnalyzer(verbose=True)

        # predominant melody extraction
        pitchfile = util.docserver_get_filename(musicbrainzid, "audioanalysis", "pitch", version="0.1")
        audio_pitch = json.load(open(pitchfile))

        output['pitch'] = None
        rec_data = dunya.makam.get_recording(musicbrainzid)
        for w in rec_data['works']:
            symbtr_file = util.docserver_get_symbtrtxt(w['mbid'])
            print(symbtr_file)
            if symbtr_file:
                score_features_file = util.docserver_get_filename(w['mbid'], "scoreanalysis", "metadata", version="0.1")
                score_features = json.load(open(score_features_file))
                joint_features, features = jointAnalyzer.analyze(
                    symbtr_file, score_features, fname, audio_pitch)

                # redo some steps in audio analysis
                features = audioAnalyzer.analyze(
                    metadata=False, pitch=False, **features)

                # get a summary of the analysis
                summarized_features = jointAnalyzer.summarize(
                    score_features=score_features, joint_features=joint_features,
                    score_informed_audio_features=features)
                audio_pitch = summarized_features['audio'].get('pitch', None)

                pitch = summarized_features['audio'].get('pitch', None)
                if pitch:
                    pitch['pitch'] = pitch['pitch'].tolist()
                melodic_progression = features.get('melodic_progression', None)
                tonic = features.get('tonic', None)
                tempo = features.get('tempo', None)
                pitch_distribution = features.get('pitch_distribution', None)
                pitch_class_distribution = features.get('pitch_class_distribution', None)
                transposition = features.get('transposition', None)
                makam = features.get('makam', None)
                note_models = features.get('note_models', None)
                notes = summarized_features['joint'].get('notes', None)
                sections = summarized_features['joint'].get('sections', None)

                if pitch_distribution:
                    pitch_distribution = pitch_distribution.to_dict()
                if pitch_class_distribution:
                    pitch_class_distribution = pitch_class_distribution.to_dict()
                if note_models:
                    note_models = to_dict(note_models)
                if melodic_progression:
                    AudioSeyirAnalyzer.serialize(melodic_progression)

                if notes:
                    min_interval = 9999
                    max_interval = 0
                    for i in notes:
                        if i['interval'][0] < min_interval:
                            min_interval = i['interval'][0]
                        if i['interval'][1] > max_interval:
                            max_interval = i['interval'][1]

                    output["works_intervals"][w['mbid']] = {"from": min_interval, "to": max_interval}
                output["pitch"] = pitch
                output["melodic_progression"] = melodic_progression
                output["pitch_distribution"] = pitch_distribution
                output["pitch_class_distribution"] = pitch_class_distribution
                output["tempo"][w['mbid']] = tempo
                output["tonic"][w['mbid']] = tonic
                output["transposition"][w['mbid']] = transposition
                output["makam"][w['mbid']] = makam
                output["note_models"][w['mbid']] = note_models
                output["notes"][w['mbid']] = notes
                output["sections"][w['mbid']] = sections

        return output