#app_2.plot_tf() #plt.show() sp_vec_1 = app_1.to_array()[0] sp_vec_2 = app_2.to_array()[0] print "%1.5f, %1.5f"%(euclid_dist(sp_vec_1,sp_vec_2), hamming_dist(sp_vec_1,sp_vec_2)) # Now the information distance from PyMP.mp_coder import joint_coding_distortion # Measure the distortion of joint coding using approx of first patter as the reference max_rate = 1000 # maximum bitrate allowed (in bits) search_width = 1024 # maximum time shift allowed in samples info_dist = joint_coding_distortion(sig_occ2, app_1, max_rate, search_width) info_dist_rev = joint_coding_distortion(sig_occ1, app_2, max_rate, search_width) print "%1.5f - %1.5f"%(info_dist/target_srr, info_dist_rev/target_srr) # building the similarity matrix # Now load the long version from PyMP.signals import LongSignal seg_size = 5*8192 long_signal = LongSignal(op.join(os.environ['PYMP_PATH'],'data/Bach_prelude_40s.wav'), seg_size, mono=True, Noverlap=0.5) # decomposing the long signal apps, decays = mp.mp_long(long_signal,
# decomposing the long signal apps, decays = mp.mp_long(long_signal, dico, target_srr, max_atom_num) mp._initialize_fftw(apps[0].dico, max_thread_num=1) dists = np.zeros((long_signal.n_seg, len(apps))) for idx in range(long_signal.n_seg): # print idx target_sig = long_signal.get_sub_signal(idx, 1, mono=True, pad=dico.get_pad()+1024,fast_create=True) for jdx in range(idx+1): # test all preceeding segments only dists[idx,jdx] = joint_coding_distortion(target_sig, apps[jdx],max_rate,1024, debug=0, precut=15, initfftw=False) mp._clean_fftw() # remove everything that is under zero cutscores = np.zeros((long_signal.n_seg, len(apps))) # normalize by reference srr cutscores[dists>0] = dists[dists>0]/ float(target_srr) plt.figure() plt.imshow(cutscores, origin='lower') plt.colorbar() plt.xlabel('Target Segment index') plt.ylabel('Reference Segment index') plt.show()