Example #1
0
    def run(self, musicbrainzid, fname):
        server_name = socket.gethostname()
        subprocess_env = os.environ.copy()
        subprocess_env["MCR_CACHE_ROOT"] = "/tmp/emptydir"
        subprocess_env[
            "LD_LIBRARY_PATH"] = "/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/runtime/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/bin/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/sys/os/glnxa64" % (
        (server_name,) * 3)
        # subprocess_env["LD_LIBRARY_PATH"] = "/usr/local/MATLAB/MATLAB_Runtime/v85/runtime/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/bin/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/os/glnxa64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/server"

        boundstat, fldmodel = (None, None)
        try:
            boundstat = util.docserver_get_filename('d2f729b8-cdc5-4019-ae0d-41695b78ee5b', "trainphraseseg",
                                                    "boundstat", version="0.1")
            fldmodel = util.docserver_get_filename('d2f729b8-cdc5-4019-ae0d-41695b78ee5b', "trainphraseseg", "fldmodel",
                                                   version="0.1")

            # boundstat = util.docserver_get_filename('31b52b29-be39-4ccb-98f2-2154140920f9', "trainphraseseg", "boundstat", version="0.1")
            # fldmodel = util.docserver_get_filename('31b52b29-be39-4ccb-98f2-2154140920f9', "trainphraseseg", "fldmodel", version="0.1")
        except util.NoFileException:
            raise Exception('No training files found for recording %s' % musicbrainzid)

        files = []
        symbtr = compmusic.dunya.makam.get_symbtr(musicbrainzid)
        files.append({'path': fname, 'name': symbtr['name']})

        fp, files_json = tempfile.mkstemp(".json")
        f = open(files_json, 'w')
        json.dump(files, f)
        f.close()
        os.close(fp)

        fp, out_json = tempfile.mkstemp(".json")
        os.close(fp)

        proc = subprocess.Popen(
            ["/srv/dunya/phraseSeg segmentWrapper %s %s %s %s" % (boundstat, fldmodel, files_json, out_json)],
            stdout=subprocess.PIPE, shell=True, env=subprocess_env)

        (out, err) = proc.communicate()
        ret = {"segments": []}

        segments_file = open(out_json, 'r')
        segments = segments_file.read()
        if segments == "":
            segments = "[]"
        ret["segments"].append(json.loads(segments))

        os.unlink(files_json)
        os.unlink(out_json)

        return ret
Example #2
0
def obtainPitchTonicForRaga(raganame, outputfolder):


        if not os.path.exists(outputfolder):
            os.makedirs(outputfolder)
        
        #get raga recording mapping
        ragaRecordings = obtainRagaRecordingMapping()
        print len(ragaRecordings.keys())
        
        for mbid in ragaRecordings[raganame]:
            pitchData = util.docserver_get_json(mbid,'pitch','pitch')
            tonic = float(util.docserver_get_contents(mbid,'ctonic','tonic'))
            HopSize = 196
            
            filename =util.docserver_get_filename(mbid,'mp3')
            
            shutil.copyfile(filename, outputfolder+ '/'+filename.split('/')[-1])
            
            filename = filename.split('/')[-1].split('.')[0]
            
            #TStamps = np.array(range(0,len(pitchData)))*np.float(HopSize)/44100.0
            
            #dump = np.array([TStamps, pitchData]).transpose()
            
            np.savetxt(outputfolder+ '/'+filename+'.pitch', pitchData, delimiter = "\t")
            
            np.savetxt(outputfolder+ '/'+filename+'.tonic', np.array([tonic]), delimiter = "\t")
Example #3
0
    def run(self, musicbrainzid, fname):
        wavfname = util.docserver_get_filename(musicbrainzid, "wav", "wave")
        proclist = ["/srv/dunya/PitchCandExt_O3", "-m", "T", "-t", "V", "-i", wavfname]
        p = subprocess.Popen(proclist, stdout=subprocess.PIPE)
        output = p.communicate()
        tonic = output[0]

        return {"tonic": str(tonic)}
Example #4
0
    def run(self, musicbrainzid, fname):
        server_name = socket.gethostname()
        subprocess_env = os.environ.copy()
        subprocess_env["MCR_CACHE_ROOT"] = "/tmp/emptydir"
        subprocess_env["LD_LIBRARY_PATH"] = (
            "/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/runtime/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/bin/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/sys/os/glnxa64"
            % ((server_name,) * 3)
        )
        # subprocess_env["LD_LIBRARY_PATH"] = "/usr/local/MATLAB/MATLAB_Runtime/v85/runtime/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/bin/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/os/glnxa64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/server"
        rec_data = dunya.makam.get_recording(musicbrainzid)

        if len(rec_data["works"]) == 0:
            raise Exception("No work on recording %s" % musicbrainzid)

        symbtrtxt = util.docserver_get_symbtrtxt(rec_data["works"][0]["mbid"])
        if not symbtrtxt:
            raise Exception("No work on recording %s" % musicbrainzid)

        metadata = util.docserver_get_filename(rec_data["works"][0]["mbid"], "metadata", "metadata", version="0.1")

        mp3file = fname
        mlbinary = util.docserver_get_filename(musicbrainzid, "makampitch", "matlab", version="0.6")
        output = tempfile.mkdtemp()

        proc = subprocess.Popen(
            ["/srv/dunya/extractTonicTempoTuning %s %s %s %s %s" % (symbtrtxt, metadata, mp3file, mlbinary, output)],
            stdout=subprocess.PIPE,
            shell=True,
            env=subprocess_env,
        )

        (out, err) = proc.communicate()

        ret = {}
        expected = ["tempo", "tonic", "tuning"]
        for f in expected:
            if os.path.isfile(os.path.join(output, f + ".json")):
                json_file = open(os.path.join(output, f + ".json"))
                ret[f] = json.loads(json_file.read())
                json_file.close()
                os.remove(os.path.join(output, f + ".json"))
            else:
                raise Exception("Missing output %s file for %s" % (f, musicbrainzid))
        os.rmdir(output)

        return ret
Example #5
0
    def run(self, musicbrainzid, fname):
        max_pitch = util.docserver_get_filename(musicbrainzid, "tomatodunya", "pitchmax", part=1, version="0.1")
        pitch = json.load(open(max_pitch))

        self._f_min = pitch['min']
        self._f_max = pitch['max']
        ret = super(MakamAudioImage, self).run(musicbrainzid, fname)

        try:
            pitchfile = util.docserver_get_filename(musicbrainzid, "jointanalysis", "pitch", part=1, version="0.1")
            loaded_pitch = json.load(open(pitchfile, 'r'))
            # If pitch extraction from jointanalysis failed then load it from audioanlysis
            if not loaded_pitch:
                pitchfile = util.docserver_get_filename(musicbrainzid, "audioanalysis", "pitch", part=1, version="0.1")
                loaded_pitch = json.load(open(pitchfile, 'r'))
        except util.NoFileException:
            pitchfile = util.docserver_get_filename(musicbrainzid, "audioanalysis", "pitch", part=1, version="0.1")
            loaded_pitch = json.load(open(pitchfile, 'r'))

        pitch = np.array(loaded_pitch['pitch'])

        audioSeyirAnalyzer = audioseyiranalyzer.AudioSeyirAnalyzer()

        # compute number of frames from some simple rules set by the user
        duration = pitch[-1][0]
        min_num_frames = 40
        max_frame_dur = 30
        frame_dur = duration / min_num_frames if duration / min_num_frames <= max_frame_dur else max_frame_dur
        frame_dur = int(5 * round(float(frame_dur) / 5))  # round to 5 seconds
        if not frame_dur:
            frame_dur = 5

        seyir_features = audioSeyirAnalyzer.analyze(pitch, frame_dur=frame_dur, hop_ratio=0.5)

        fimage = tempfile.NamedTemporaryFile(mode='w+', suffix=".png")
        plot(seyir_features, fimage.name)
        fimage.flush()
        fileContent = None
        with open(fimage.name, mode='rb') as file:
            file_content = file.read()
        if not file_content:
            raise Exception("No image generated")
        ret['smallfull'] = file_content

        return ret
Example #6
0
    def run(self, musicbrainzid, fname):
        audioAnalyzer = AudioAnalyzer(verbose=True)
        jointAnalyzer = JointAnalyzer(verbose=True)

        audioAnalyzer.set_pitch_extractor_params(hop_size=196, bin_resolution=7.5)
        # predominant melody extraction
        audio_pitch = audioAnalyzer.extract_pitch(fname)

        joint_pitch = None
        notes = {}
        rec_data = dunya.makam.get_recording(musicbrainzid)
        for w in rec_data['works']:
            symbtr_file = util.docserver_get_symbtrtxt(w['mbid'])
            print(symbtr_file)

            if symbtr_file:
                score_features_file = util.docserver_get_filename(w['mbid'], "scoreanalysis", "metadata", version="0.1")
                score_features = json.load(open(score_features_file))
                joint_features, features = jointAnalyzer.analyze(
                    symbtr_file, score_features, fname, audio_pitch)

                # redo some steps in audio analysis
                features = audioAnalyzer.analyze(
                    metadata=False, pitch=False, **features)

                # get a summary of the analysis
                summarized_features = jointAnalyzer.summarize(
                    score_features=score_features, joint_features=joint_features,
                    score_informed_audio_features=features)

                joint_pitch = summarized_features['audio'].get('pitch', None)

                notes[w['mbid']] = summarized_features['joint'].get('notes', None)

        if joint_pitch:
            audio_pitch = joint_pitch

        pitch = [p[1] for p in audio_pitch['pitch']]

        # pitches as bytearray
        packed_pitch = six.BytesIO()
        max_pitch = max(pitch)
        temp = [p for p in pitch if p > 0]
        min_pitch = min(temp)

        height = 255
        for p in pitch:
            if p < min_pitch:
                packed_pitch.write(struct.pack("B", 0))
            else:
                packed_pitch.write(struct.pack("B", int((p - min_pitch) * 1.0 / (max_pitch - min_pitch) * height)))

        output = {}
        output['pitch'] = packed_pitch.getvalue()
        output['pitchmax'] = {'max': max_pitch, 'min': min_pitch}

        return output
Example #7
0
    def test_derived_file_path(self):
        pathder = self.df.full_path_for_part(1)
        self.assertEqual("/collectionroot/derived/f5/f522f7c6-8299-44e9-889f-063d37526801/derived/0.1/f522f7c6-8299-44e9-889f-063d37526801-derived-0.1-meta-1.json", pathder)

        with self.assertRaises(exceptions.NoFileException) as cm:
            pathder = self.df.full_path_for_part(3)
        self.assertEqual(cm.exception.message, "partnumber is greater than number of parts")

        pathder2 = util.docserver_get_filename("f522f7c6-8299-44e9-889f-063d37526801", "derived", "meta", "2")
        self.assertEqual("/collectionroot/derived/f5/f522f7c6-8299-44e9-889f-063d37526801/derived/0.1/f522f7c6-8299-44e9-889f-063d37526801-derived-0.1-meta-2.json", pathder2)
Example #8
0
    def run(self, musicbrainzid, fname):
        wavfname = util.docserver_get_filename(musicbrainzid, "wav", "wave")
        proclist = [
            "/srv/dunya/PitchCandExt_O3", "-m", "T", "-t", "V", "-i", wavfname
        ]
        p = subprocess.Popen(proclist, stdout=subprocess.PIPE)
        output = p.communicate()
        tonic = output[0]

        return {"tonic": str(tonic)}
Example #9
0
  def run(self, musicbrainzid, fname):
    output = super(DunyaPitchMakam, self).run(musicbrainzid, fname)

    # Compute the pitch octave correction

    tonicfile = util.docserver_get_filename(musicbrainzid, "tonictempotuning", "tonic", version="0.1")
    alignednotefile = util.docserver_get_filename(musicbrainzid, "scorealign", "notesalign", version="0.1")

    
    pitch = array(output['pitch'])
    out_pitch = [p[1] for p in output["pitch"]]
    tonic = json.load(open(tonicfile, 'r'))['scoreInformed']['Value']
    notes = json.load(open(alignednotefile, 'r'))['notes']

    pitch_corrected, synth_pitch, notes = alignedpitchfilter.correctOctaveErrors(pitch, notes, tonic)
    output["pitch_corrected"] = pitch_corrected
    output["pitch"] = out_pitch
    del output["matlab"]
    del output["settings"]
    return output
Example #10
0
    def run(self, musicbrainzid, fname):
        server_name = socket.gethostname()
        subprocess_env = os.environ.copy()
        subprocess_env["MCR_CACHE_ROOT"] = "/tmp/emptydir"
        subprocess_env["LD_LIBRARY_PATH"] = "/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/runtime/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/bin/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/sys/os/glnxa64" % ((server_name,)*3)
        #subprocess_env["LD_LIBRARY_PATH"] = "/usr/local/MATLAB/MATLAB_Runtime/v85/runtime/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/bin/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/os/glnxa64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sy    s/java/jre/glnxa64/jre/lib/amd64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/server"
        rec_data = dunya.makam.get_recording(musicbrainzid)
 
        if len(rec_data['works']) == 0:
            raise Exception('No work on recording %s' % musicbrainzid)
 
        symbtrtxt =util.docserver_get_symbtrtxt(rec_data['works'][0]['mbid'])
        if not symbtrtxt:
            raise Exception('No work on recording %s' % musicbrainzid)
        metadata = util.docserver_get_filename(rec_data['works'][0]['mbid'], "metadata", "metadata", version="0.1")
        tonic = util.docserver_get_filename(musicbrainzid, "tonictempotuning", "tonic", version="0.1")
        tempo = util.docserver_get_filename(musicbrainzid, "tonictempotuning", "tempo", version="0.1")
        tuning = util.docserver_get_filename(musicbrainzid, "tonictempotuning", "tuning", version="0.1")
        melody = util.docserver_get_filename(musicbrainzid, "makampitch", "matlab", version="0.6")

        mp3file = fname
        output = tempfile.mkdtemp()
        print "/srv/dunya/alignAudioScore %s %s '' %s %s %s %s %s %s" % (symbtrtxt, metadata, mp3file, melody, tonic, tempo, tuning, output) 
        proc = subprocess.Popen(["/srv/dunya/alignAudioScore %s %s '' %s %s %s %s %s %s" % (symbtrtxt, metadata, mp3file, melody, tonic, tempo, tuning, output)], stdout=subprocess.PIPE, shell=True, env=subprocess_env)
        (out, err) = proc.communicate()
        
        ret = {}
        if os.path.isfile(os.path.join(output, 'alignedNotes.json')):
            json_file = open(os.path.join(output, 'alignedNotes.json'))
            ret['notesalign'] = json.loads(json_file.read())
            json_file.close()
            os.remove(os.path.join(output, 'alignedNotes.json'))
        if os.path.isfile(os.path.join(output, 'sectionLinks.json')):
            json_file = open(os.path.join(output, 'sectionLinks.json'))
            ret['sectionlinks'] = json.loads(json_file.read())
            json_file.close()
            os.remove(os.path.join(output, 'sectionLinks.json'))
        
        os.rmdir(output)

        return ret
Example #11
0
    def run(self, fname):

        yamltonic = self.get_from_file(self.musicbrainz_id)
        if yamltonic:
            print "Got tonic from a yaml file"
            tonic = yamltonic
        else:
            print "Need to calculate the tonic from scratch"
            wavfname = util.docserver_get_filename(self.musicbrainz_id, "wav", "wave")
            proclist = ["/srv/dunya/PitchCandExt_O3", "-m", "T", "-t", "V", "-i", wavfname]
            p = subprocess.Popen(proclist, stdout=subprocess.PIPE)
            output = p.communicate()
            tonic = output[0]

        return {"tonic": str(tonic)}
Example #12
0
    def test_derived_file_path(self):
        pathder = self.df.full_path_for_part(1)
        self.assertEqual(
            "/collectionroot/derived/f5/f522f7c6-8299-44e9-889f-063d37526801/derived/0.1/f522f7c6-8299-44e9-889f-063d37526801-derived-0.1-meta-1.json",
            pathder)

        with self.assertRaises(exceptions.NoFileException) as cm:
            pathder = self.df.full_path_for_part(3)
        self.assertEqual(cm.exception.message,
                         "partnumber is greater than number of parts")

        pathder2 = util.docserver_get_filename(
            "f522f7c6-8299-44e9-889f-063d37526801", "derived", "meta", "2")
        self.assertEqual(
            "/collectionroot/derived/f5/f522f7c6-8299-44e9-889f-063d37526801/derived/0.1/f522f7c6-8299-44e9-889f-063d37526801-derived-0.1-meta-2.json",
            pathder2)
Example #13
0
    def run(self, musicbrainzid, fname):
        output = {
            "works_intervals": {},
            "tonic": {},
            "pitch": {},
            "melodic_progression": {},
            "tempo": {},
            "pitch_distribution": {},
            "pitch_class_distribution": {},
            "transposition": {},
            "makam": {},
            "note_models": {},
            "notes": {},
            "sections": {}
        }

        audioAnalyzer = AudioAnalyzer(verbose=True)
        jointAnalyzer = JointAnalyzer(verbose=True)

        # predominant melody extraction
        pitchfile = util.docserver_get_filename(musicbrainzid,
                                                "audioanalysis",
                                                "pitch",
                                                version="0.1")
        audio_pitch = json.load(open(pitchfile))

        output['pitch'] = None
        rec_data = dunya.makam.get_recording(musicbrainzid)
        for w in rec_data['works']:
            symbtr_file = util.docserver_get_symbtrtxt(w['mbid'])
            print symbtr_file
            if symbtr_file:
                score_features_file = util.docserver_get_filename(
                    w['mbid'], "scoreanalysis", "metadata", version="0.1")
                score_features = json.load(open(score_features_file))
                joint_features, features = jointAnalyzer.analyze(
                    symbtr_file, score_features, fname, audio_pitch)

                # redo some steps in audio analysis
                features = audioAnalyzer.analyze(metadata=False,
                                                 pitch=False,
                                                 **features)

                # get a summary of the analysis
                summarized_features = jointAnalyzer.summarize(
                    score_features=score_features,
                    joint_features=joint_features,
                    score_informed_audio_features=features)
                audio_pitch = summarized_features['audio'].get('pitch', None)

                pitch = summarized_features['audio'].get('pitch', None)
                if pitch:
                    pitch['pitch'] = pitch['pitch'].tolist()
                melodic_progression = features.get('melodic_progression', None)
                tonic = features.get('tonic', None)
                tempo = features.get('tempo', None)
                pitch_distribution = features.get('pitch_distribution', None)
                pitch_class_distribution = features.get(
                    'pitch_class_distribution', None)
                transposition = features.get('transposition', None)
                makam = features.get('makam', None)
                note_models = features.get('note_models', None)
                notes = summarized_features['joint'].get('notes', None)
                sections = summarized_features['joint'].get('sections', None)

                if pitch_distribution:
                    pitch_distribution = pitch_distribution.to_dict()
                if pitch_class_distribution:
                    pitch_class_distribution = pitch_class_distribution.to_dict(
                    )
                if note_models:
                    note_models = to_dict(note_models)
                if melodic_progression:
                    AudioSeyirAnalyzer.serialize(melodic_progression)

                if notes:
                    min_interval = 9999
                    max_interval = 0
                    for i in notes:
                        if i['interval'][0] < min_interval:
                            min_interval = i['interval'][0]
                        if i['interval'][1] > max_interval:
                            max_interval = i['interval'][1]

                    output["works_intervals"][w['mbid']] = {
                        "from": min_interval,
                        "to": max_interval
                    }
                output["pitch"] = pitch
                output["melodic_progression"] = melodic_progression
                output["pitch_distribution"] = pitch_distribution
                output["pitch_class_distribution"] = pitch_class_distribution
                output["tempo"][w['mbid']] = tempo
                output["tonic"][w['mbid']] = tonic
                output["transposition"][w['mbid']] = transposition
                output["makam"][w['mbid']] = makam
                output["note_models"][w['mbid']] = note_models
                output["notes"][w['mbid']] = notes
                output["sections"][w['mbid']] = sections

        return output
    def run(self, musicbrainzid, fname):
        max_pitch = util.docserver_get_filename(musicbrainzid,
                                                "tomatodunya",
                                                "pitchmax",
                                                part=1,
                                                version="0.1")
        pitch = json.load(open(max_pitch))

        self._f_min = pitch['min']
        self._f_max = pitch['max']
        ret = super(MakamAudioImage, self).run(musicbrainzid, fname)

        try:
            pitchfile = util.docserver_get_filename(musicbrainzid,
                                                    "jointanalysis",
                                                    "pitch",
                                                    part=1,
                                                    version="0.1")
            loaded_pitch = json.load(open(pitchfile, 'r'))
            # If pitch extraction from jointanalysis failed then load it from audioanlysis
            if not loaded_pitch:
                pitchfile = util.docserver_get_filename(musicbrainzid,
                                                        "audioanalysis",
                                                        "pitch",
                                                        part=1,
                                                        version="0.1")
                loaded_pitch = json.load(open(pitchfile, 'r'))
        except util.NoFileException:
            pitchfile = util.docserver_get_filename(musicbrainzid,
                                                    "audioanalysis",
                                                    "pitch",
                                                    part=1,
                                                    version="0.1")
            loaded_pitch = json.load(open(pitchfile, 'r'))

        pitch = np.array(loaded_pitch['pitch'])

        audioSeyirAnalyzer = audioseyiranalyzer.AudioSeyirAnalyzer()

        # compute number of frames from some simple rules set by the user
        duration = pitch[-1][0]
        min_num_frames = 40
        max_frame_dur = 30
        frame_dur = duration / min_num_frames if duration / min_num_frames <= max_frame_dur else max_frame_dur
        frame_dur = int(5 * round(float(frame_dur) / 5))  # round to 5 seconds
        if not frame_dur:
            frame_dur = 5

        seyir_features = audioSeyirAnalyzer.analyze(pitch,
                                                    frame_dur=frame_dur,
                                                    hop_ratio=0.5)

        fimage = tempfile.NamedTemporaryFile(mode='w+', suffix=".png")
        plot(seyir_features, fimage.name)
        fimage.flush()
        fileContent = None
        with open(fimage.name, mode='rb') as file:
            file_content = file.read()
        if not file_content:
            raise Exception("No image generated")
        ret['smallfull'] = file_content

        return ret
Example #15
0
    def run(self, musicbrainzid, fname):
        output = {
            "works_intervals": {},
            "tonic": {},
            "pitch": {},
            "melodic_progression": {},
            "tempo": {},
            "pitch_distribution": {},
            "pitch_class_distribution": {},
            "transposition": {},
            "makam": {},
            "note_models": {},
            "notes": {},
            "sections": {}
        }

        audioAnalyzer = AudioAnalyzer(verbose=True)
        jointAnalyzer = JointAnalyzer(verbose=True)

        # predominant melody extraction
        pitchfile = util.docserver_get_filename(musicbrainzid, "audioanalysis", "pitch", version="0.1")
        audio_pitch = json.load(open(pitchfile))

        output['pitch'] = None
        rec_data = dunya.makam.get_recording(musicbrainzid)
        for w in rec_data['works']:
            symbtr_file = util.docserver_get_symbtrtxt(w['mbid'])
            print(symbtr_file)
            if symbtr_file:
                score_features_file = util.docserver_get_filename(w['mbid'], "scoreanalysis", "metadata", version="0.1")
                score_features = json.load(open(score_features_file))
                joint_features, features = jointAnalyzer.analyze(
                    symbtr_file, score_features, fname, audio_pitch)

                # redo some steps in audio analysis
                features = audioAnalyzer.analyze(
                    metadata=False, pitch=False, **features)

                # get a summary of the analysis
                summarized_features = jointAnalyzer.summarize(
                    score_features=score_features, joint_features=joint_features,
                    score_informed_audio_features=features)
                audio_pitch = summarized_features['audio'].get('pitch', None)

                pitch = summarized_features['audio'].get('pitch', None)
                if pitch:
                    pitch['pitch'] = pitch['pitch'].tolist()
                melodic_progression = features.get('melodic_progression', None)
                tonic = features.get('tonic', None)
                tempo = features.get('tempo', None)
                pitch_distribution = features.get('pitch_distribution', None)
                pitch_class_distribution = features.get('pitch_class_distribution', None)
                transposition = features.get('transposition', None)
                makam = features.get('makam', None)
                note_models = features.get('note_models', None)
                notes = summarized_features['joint'].get('notes', None)
                sections = summarized_features['joint'].get('sections', None)

                if pitch_distribution:
                    pitch_distribution = pitch_distribution.to_dict()
                if pitch_class_distribution:
                    pitch_class_distribution = pitch_class_distribution.to_dict()
                if note_models:
                    note_models = to_dict(note_models)
                if melodic_progression:
                    AudioSeyirAnalyzer.serialize(melodic_progression)

                if notes:
                    min_interval = 9999
                    max_interval = 0
                    for i in notes:
                        if i['interval'][0] < min_interval:
                            min_interval = i['interval'][0]
                        if i['interval'][1] > max_interval:
                            max_interval = i['interval'][1]

                    output["works_intervals"][w['mbid']] = {"from": min_interval, "to": max_interval}
                output["pitch"] = pitch
                output["melodic_progression"] = melodic_progression
                output["pitch_distribution"] = pitch_distribution
                output["pitch_class_distribution"] = pitch_class_distribution
                output["tempo"][w['mbid']] = tempo
                output["tonic"][w['mbid']] = tonic
                output["transposition"][w['mbid']] = transposition
                output["makam"][w['mbid']] = makam
                output["note_models"][w['mbid']] = note_models
                output["notes"][w['mbid']] = notes
                output["sections"][w['mbid']] = sections

        return output
Example #16
0
    def run(self, musicbrainzid, fname):
        server_name = socket.gethostname()
        subprocess_env = os.environ.copy()
        subprocess_env["MCR_CACHE_ROOT"] = "/tmp/emptydir"
        subprocess_env[
            "LD_LIBRARY_PATH"] = "/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/runtime/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/bin/glnxa64:/mnt/compmusic/%s/MATLAB/MATLAB_Compiler_Runtime/v85/sys/os/glnxa64" % (
                (server_name, ) * 3)
        #subprocess_env["LD_LIBRARY_PATH"] = "/usr/local/MATLAB/MATLAB_Runtime/v85/runtime/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/bin/glnxa64:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/os/glnxa64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/:/usr/local/MATLAB/MATLAB_Runtime/v85/sys/java/jre/glnxa64/jre/lib/amd64/server"

        boundstat, fldmodel = (None, None)
        try:
            boundstat = util.docserver_get_filename(
                'd2f729b8-cdc5-4019-ae0d-41695b78ee5b',
                "trainphraseseg",
                "boundstat",
                version="0.1")
            fldmodel = util.docserver_get_filename(
                'd2f729b8-cdc5-4019-ae0d-41695b78ee5b',
                "trainphraseseg",
                "fldmodel",
                version="0.1")

            #boundstat = util.docserver_get_filename('31b52b29-be39-4ccb-98f2-2154140920f9', "trainphraseseg", "boundstat", version="0.1")
            #fldmodel = util.docserver_get_filename('31b52b29-be39-4ccb-98f2-2154140920f9', "trainphraseseg", "fldmodel", version="0.1")
        except util.NoFileException:
            raise Exception('No training files found for recording %s' %
                            musicbrainzid)

        files = []
        symbtr = compmusic.dunya.makam.get_symbtr(musicbrainzid)
        files.append({'path': fname, 'name': symbtr['name']})

        fp, files_json = tempfile.mkstemp(".json")
        f = open(files_json, 'w')
        json.dump(files, f)
        f.close()
        os.close(fp)

        fp, out_json = tempfile.mkstemp(".json")
        os.close(fp)

        proc = subprocess.Popen([
            "/srv/dunya/phraseSeg segmentWrapper %s %s %s %s" %
            (boundstat, fldmodel, files_json, out_json)
        ],
                                stdout=subprocess.PIPE,
                                shell=True,
                                env=subprocess_env)

        (out, err) = proc.communicate()
        ret = {"segments": []}

        segments_file = open(out_json, 'r')
        segments = segments_file.read()
        if segments == "":
            segments = "[]"
        ret["segments"].append(json.loads(segments))

        os.unlink(files_json)
        os.unlink(out_json)

        return ret