print 'do_resample =', do_resample else: break sys.argv.pop(1) # load song songpath = sys.argv[1] print 'song file:',songpath dictpath = sys.argv[2] print 'codebook file:',dictpath # feats feats = FEATS.features_from_matfile(songpath,pSize=pSize, usebars=usebars,keyInv=keyInv, songKeyInv=songKeyInv, positive=positive, do_resample=do_resample) # model mat = scipy.io.loadmat(dictpath) codebook = mat['codebook'] model = MODEL.Model(codebook) # predict best_code_per_pattern, avg_dist = model.predicts(feats) # report distortion (per... pixel? patch point?) print 'average distortion:', np.average(avg_dist) # build original and encoding patch_len = codebook.shape[1]/12
def next_track(self, auto_bar=None): """ Returns features for a random matlab file, or None if some error. If only one iter over the data, raise StopIteration at the end. """ # find next matfile if not self._oneFullIter: matfile = self._matfiles[np.random.randint(len(self._matfiles))] else: if self._fileidx >= len(self._matfiles): self._fileidx = 0 # for next time raise StopIteration matfile = self._matfiles[self._fileidx] self._fileidx += 1 # offset offset = 0 if self._randoffset: offset = np.random.randint(4) # return features if auto_bar == None: return features.features_from_matfile( matfile, pSize=self._pSize, usebars=self._usebars, keyInv=self._keyInv, songKeyInv=self._songKeyInv, positive=self._positive, do_resample=self._do_resample, partialbar=self._partialbar, offset=offset) else: # we assume auto_bar contains a model # we predict on every offset until 4, return features with the best offset based on the model best_dist = np.inf best_feats = None realSize = self._pSize if self._partialbar > 0: realSize = self._partialbar # we go only until 4 for offset in range(min(realSize, 4)): # get features feats = features.features_from_matfile( matfile, pSize=self._pSize, usebars=self._usebars, keyInv=self._keyInv, songKeyInv=self._songKeyInv, positive=self._positive, do_resample=self._do_resample, partialbar=self._partialbar, offset=offset) if feats == None: continue # predicts tmp, avg_dist = auto_bar.predicts(feats) d = np.average(avg_dist) if d < best_dist: best_dist = d best_feats = feats # done, return best features return best_feats
# analysis_dict2 analysis_dict2 = {'segstart':seg_start,'chromas':pitches, 'beatstart':beat_start,'barstart':bar_start, 'duration':duration} tzan_feats = features.get_features(analysis_dict2,pSize=8,usebars=2, keyInv=True,songKeyInv=False, positive=False,do_resample=True, btchroma_barbts=None) tzan_feats = tzan_feats[np.nonzero(np.sum(tzan_feats,axis=1))] print 'features from tzan data computed, shape =',tzan_feats.shape # feature from matfile mat_feats = features.features_from_matfile(tmpfilemat,pSize=8,usebars=2, keyInv=True,songKeyInv=False, positive=False,do_resample=True) mat_feats = mat_feats[np.nonzero(np.sum(mat_feats,axis=1))] print 'features from matfile computed, shape =',mat_feats.shape # features from matfile old school import data_iterator import feats_utils as FU data_iter = data_iterator.DataIterator() data_iter.setMatfiles([tmpfilemat]) data_iter.useBars(2) data_iter.stopAfterOnePass(True) featsNorm = [FU.normalize_pattern_maxenergy(p,8,True,False).flatten() for p in data_iter] featsNorm = np.array(featsNorm) res = [np.sum(r) > 0 for r in featsNorm] res2 = np.where(res)
def next_track(self, auto_bar=None): """ Returns features for a random matlab file, or None if some error. If only one iter over the data, raise StopIteration at the end. """ # find next matfile if not self._oneFullIter: matfile = self._matfiles[np.random.randint(len(self._matfiles))] else: if self._fileidx >= len(self._matfiles): self._fileidx = 0 # for next time raise StopIteration matfile = self._matfiles[self._fileidx] self._fileidx += 1 # offset offset = 0 if self._randoffset: offset = np.random.randint(4) # return features if auto_bar == None: return features.features_from_matfile( matfile, pSize=self._pSize, usebars=self._usebars, keyInv=self._keyInv, songKeyInv=self._songKeyInv, positive=self._positive, do_resample=self._do_resample, partialbar=self._partialbar, offset=offset, ) else: # we assume auto_bar contains a model # we predict on every offset until 4, return features with the best offset based on the model best_dist = np.inf best_feats = None realSize = self._pSize if self._partialbar > 0: realSize = self._partialbar # we go only until 4 for offset in range(min(realSize, 4)): # get features feats = features.features_from_matfile( matfile, pSize=self._pSize, usebars=self._usebars, keyInv=self._keyInv, songKeyInv=self._songKeyInv, positive=self._positive, do_resample=self._do_resample, partialbar=self._partialbar, offset=offset, ) if feats == None: continue # predicts tmp, avg_dist = auto_bar.predicts(feats) d = np.average(avg_dist) if d < best_dist: best_dist = d best_feats = feats # done, return best features return best_feats
print 'do_resample =', do_resample else: break sys.argv.pop(1) # load song songpath = sys.argv[1] print 'song file:', songpath dictpath = sys.argv[2] print 'codebook file:', dictpath # feats feats = FEATS.features_from_matfile(songpath, pSize=pSize, usebars=usebars, keyInv=keyInv, songKeyInv=songKeyInv, positive=positive, do_resample=do_resample) # model mat = scipy.io.loadmat(dictpath) codebook = mat['codebook'] model = MODEL.Model(codebook) # predict best_code_per_pattern, avg_dist = model.predicts(feats) # report distortion (per... pixel? patch point?) print 'average distortion:', np.average(avg_dist) # build original and encoding