예제 #1
0
def plot(input_filename, output_filename, scale=None, dim=0, size=(3.45, 2.0)):
    """Plots the a spectrogram to an output file
    """
    s = feature_track.FeatureTrack().load(input_filename)

    if s.data.ndim > 1:
        d = s.data[:, dim]
    else:
        d = s.data

    min_y = numpy.min(d)
    max_y = numpy.max(d)
    min_time = 0
    max_time = float(len(d)) / s.metadata.sampling_configuration.ofs

    ylabel = s.metadata.feature.split()[dim]
    if scale is not None:
        ylabel += ' ('
        ylabel += str(scale)
        ylabel += ')'

    x_axis = numpy.array(range(len(d))) / \
            float(s.metadata.sampling_configuration.ofs)

    im = plt.plot(x_axis, d)
    plt.xlabel('Time (s)')
    plt.ylabel(ylabel)
    fig = plt.gcf()
    width_inches = size[0]  #/80.0
    height_inches = size[1]  #/80.0
    fig.set_size_inches((width_inches, height_inches))
    plt.savefig(output_filename, bbox_inches='tight')
예제 #2
0
    def run(self, args):
        a = track.FeatureTrack().load(args.infile)
        t = 0

        if args.filename is True:
            print "Stored ", len(a.metadata.filename), " files"
            print a.metadata.filename
            exit()

        b = a.data

        if b.ndim == 1:
            b.shape = (b.size, 1)

        if args.norm:
            b = (b - b.mean(axis=0)) / numpy.maximum(0.00001, b.std(axis=0))

        for n in xrange(b.shape[0]):
            # TODO: update to use metadata
            if args.time:
                print t,
                t += (1.0 / a.metadata.sampling_configuration.ofs)
            for k in xrange(b.shape[1]):
                print b[n, k],
            print ""

        print b.shape
        print a.metadata.feature
예제 #3
0
 def calc_track(self, data, frame_length, window_size, rate=44100):
     t = track.FeatureTrack()
     t.data = td_feats.zero_crossings(data, frame_length, window_size)
     t.metadata.sampling_configuration.fs = rate
     t.metadata.sampling_configuration.ofs = rate / window_size
     t.metadata.sampling_configuration.window_length = frame_length
     t.metadata.feature = "TDZeroCrossings"
     return t
예제 #4
0
    def calc_track_band(self, spectrum, min_freq_bin, max_freq_bin):
        t = track.FeatureTrack()
        t.data = feats.centroid(spectrum.data[min_freq_bin:max_freq_bin])
        t.metadata.sampling_configuration = spectrum.metadata.sampling_configuration
        t.metadata.feature = "Centroid_" + str(min_freq_bin) + "_" +\
            str(max_freq_bin)
        t.metadata.filename = spectrum.metadata.input.name
        t.metadata.input_metadata = copy.deepcopy(spectrum.metadata)

        return t
예제 #5
0
def load_feature_files(feature_files):
    features = []

    for i in feature_files:
        t = track.FeatureTrack()
        f = open(i, "r")
        t = t.load(f)
        features.append(t)
        f.close()

    return features
예제 #6
0
    def calc_track(self, spectrum):
        t = track.FeatureTrack()
        t.data = spectrum.data.T

        t.metadata.sampling_configuration = spectrum.metadata.sampling_configuration
        t.metadata.feature = ""
        for f in xrange(spectrum.data.shape[0]):
            t.metadata.feature += "DFT_" + str(spectrum.freq_bin(f)) + "Hz "

        t.metadata.filename = spectrum.metadata.input.name
        return t
예제 #7
0
    def run(self, args):
        s = spectrogram.Spectrogram().load(args.infile)

        t = track.FeatureTrack()
        t.data = feats.centroid(s.data) * \
            s.metadata.sampling_configuration.ofs

        t.metadata.sampling_configuration = s.metadata.sampling_configuration
        t.metadata.feature = "Centroid"
        t.metadata.filename = s.metadata.input.name
        t.save(args.outfile)
예제 #8
0
    def run(self, args):
        print "Joining features"
        features = []

        for a in args.input:
            t = track.FeatureTrack()
            t = t.load(a)
            features.append(t)
            print a.name, t.metadata.feature, t.data.shape

        o = self.join(features)

        o.save(args.output)
예제 #9
0
    def run(self, args):
        print "Calculating stats..."
        feature_tracks = []

        for a in args.infiles:
            f = track.FeatureTrack()
            f = f.load(a)
            feature_tracks.append(f)

        p = self.stats(feature_tracks, args.mean, args.variance, args.slope,
                       args.limits, args.csv, args.normalize)

        p.save(args.outfile)
예제 #10
0
    def calc_track_band(self, spectrum, texture_length, min_freq_bin,
                        max_freq_bin):
        t = track.FeatureTrack()
        t.data = feats.low_energy(spectrum.data[min_freq_bin:max_freq_bin],
                                  texture_length)
        t.metadata.sampling_configuration = spectrum.metadata.sampling_configuration
        t.metadata.feature = "LowEnergy_" + str(min_freq_bin) + "_" +\
            str(max_freq_bin)

        t.metadata.filename = spectrum.metadata.input.name
        t.metadata.input_metadata = copy.deepcopy(spectrum.metadata)

        return t
예제 #11
0
    def run(self, args):
        s = spectrogram.Spectrogram().load(args.infile)

        t = track.FeatureTrack()

        H = self.build_filterbank(args.central_frequencies, s)

        t.data = numpy.dot(H, s.data).T

        t.metadata.sampling_configuration = s.metadata.sampling_configuration
        t.metadata.feature = ""
        for f in args.central_frequencies:
            t.metadata.feature += "FB_" + str(f) + "Hz "

        t.metadata.filename = s.metadata.input.name
        t.save(args.outfile)
예제 #12
0
    def run(self, args):
        o = track.FeatureTrack()

        o = o.load(args.infile)

        p = self_similarity_matrix.SelfSimilarityMatrix()

        data = o.data

        if args.normalize:
            std_p = data.std(axis=0)
            data = (data - data.mean(axis=0))/\
                    numpy.maximum(10**(-6), std_p)

        p.data = self_similarity.self_similarity_euclidean(data)

        p.metadata.feature = o.metadata.feature
        p.metadata.sampling_configuration = o.metadata.sampling_configuration
        p.metadata.filename = o.metadata.filename

        p.save(args.outfile)
예제 #13
0
파일: diff.py 프로젝트: templeblock/pymir3
    def calc_track(self, t):
        o = track.FeatureTrack()

        if t.data.ndim == 1:
            t.data.shape = (t.data.size, 1)

        pad = numpy.zeros((1, t.data.shape[1]))

        o.data = numpy.vstack((pad, numpy.diff(t.data, axis=0)))

        # Dealing with feature metadata:
        o.metadata = copy.deepcopy(t.metadata)
        o.metadata.input_metadata = copy.deepcopy(t.metadata)
        my_features = t.metadata.feature.split()
        new_features = ""
        for feat in my_features:
            if (len(new_features) > 2) and (new_features[-1] != " "):
                new_features += " "
            new_features = new_features + "diff_" + feat
        o.metadata.feature = new_features

        return o
예제 #14
0
    def join(self, feature_tracks):
        data = []
        features = []

        o = track.FeatureTrack()
        o.metadata.input_metadata = []

        for t in feature_tracks:
            if t is None:
                continue

            if t.data.ndim == 1:
                t.data.shape = (t.data.size, 1)
            data.append(t.data)
            features.append(t.metadata.feature)
            o.metadata.input_metadata.append(copy.deepcopy(t.metadata))
            #print a.name, t.metadata.feature, t.data.shape

        o.data = numpy.hstack(data)
        o.metadata.feature = ' '.join(features)
        o.metadata.filename = t.metadata.filename
        o.metadata.sampling_configuration = t.metadata.sampling_configuration

        return o
예제 #15
0
파일: diff.py 프로젝트: templeblock/pymir3
 def run(self, args):
     t = track.FeatureTrack().load(args.infile)
     o = self.calc_track(t)
     o.save(args.outfile)
예제 #16
0
    def to_texture(self, analysis_track, texture_size):

        window_track = ft.FeatureTrack()
        window_track.metadata.sampling_configuration = analysis_track.metadata.sampling_configuration
        feats = analysis_track.metadata.feature.strip().split()

        window_track.metadata.feature = ""
        #ta certo manter esse nome?
        window_track.metadata.filename = analysis_track.metadata.filename

        w = texture_size
        N = len(analysis_track.data)

        begin = 0
        begin + w

        ret = numpy.array(())
        ret.shape = (0,0)
        dT = analysis_track.data.T
        it = 1 if dT.ndim == 1 else dT.shape[0]

        #print feats, len(feats)

        ts = ""
        ts2 = ""
        for f in feats:
            ts += " tx_mean_" + f
            ts2 += " tx_var_" + f
        ts = (ts + ts2).strip()

        #print ts, len(ts.split(" "))

        window_track.metadata.feature = ts
        window_track.metadata.input_metadata =\
                copy.deepcopy(analysis_track.metadata)

        #print feats
        #print ts

        #print dT.shape, dT[0], N

        n = 0
        S = numpy.array([0.0] * it)
        m = numpy.array([0.0] * it)

        saida = numpy.zeros((analysis_track.data.shape[0], analysis_track.data.shape[1]*2))

        #print "shape da saida:", saida.shape

        #window_track.metadata.feature += " tx_mean_" + feats[i] +\
        #                                         " tx_var_" + feats[i]

        # print "dT[:,:w].shape: ", dT[:,:w].shape
        # print "dT[:,:w]: ", dT[:,:w]

        # print dT[:,:w][0]

        for x in dT[:,:w].T:

            n = n + 1

            n_m = m + (x - m)/n
            n_s = S + (x - m)*(x - n_m)

            m = n_m
            S = n_s

        #print m.shape, (S/n).shape
        y = numpy.concatenate((m, S/n), axis=0)
        saida[n] = y

        #     print n
        #     print "average: ", numpy.average(dT[:,:n], axis=1), m
        #     print "S:", S
        #     print "variance: ", S/n, numpy.var(dT[:,:n], axis=1)

        # print "boo"


        for i in range(w, N):

            m = n_m

            n_m = m + (dT[:,i]-m)/n - (dT[:,i-w]-m)/n

            S = S + ( (dT[:,i] - n_m) * (dT[:,i] - m) ) - ( ( dT[:,i-w] - m )*( dT[:,i-w] - n_m ) )

            # print i
            # print "average: ",  numpy.average(dT[:,i-w+1:i+1], axis=1), n_m
            # print "S:", S
            # print "variance: ", S/(n), numpy.var(dT[:,i-w+1:i+1], axis=1)

            y = numpy.concatenate((m, S/n), axis=0)
            saida[i] = y

        #print saida.shape

        ret = saida[w:,:]

        #print saida.shape

    # def to_texture(self, analysis_track, texture_size):

    #     window_track = ft.FeatureTrack()
    #     window_track.metadata.sampling_configuration = analysis_track.metadata.sampling_configuration
    #     feats = analysis_track.metadata.feature.split(" ")

    #     window_track.metadata.feature = ""
    #     #ta certo manter esse nome?
    #     window_track.metadata.filename = analysis_track.metadata.filename

    #     step = texture_size
    #     total_aw = len(analysis_track.data)

    #     begin = 0
    #     end = begin + step

    #     ret = numpy.array(())
    #     ret.shape = (0,0)
    #     dT = analysis_track.data.T
    #     it = 1 if dT.ndim == 1 else dT.shape[0]

    #     for i in range(it):
    #         a = numpy.array(())
    #         a.shape = (2,0)
    #         begin = 0
    #         end = begin + step
    #         window_track.metadata.feature += " tx_mean_" + feats[i] +\
    #                                              " tx_var_" + feats[i]

    #         while end <= total_aw:
    #             b = numpy.mean(dT[begin:end]) if dT.ndim == 1 else numpy.mean(dT[i,begin:end])
    #             c = numpy.var(dT[begin:end]) if dT.ndim == 1 else numpy.var(dT[i,begin:end])
    #             d = numpy.vstack( (b, c) )
    #             #print d.shape, a.shape
    #             a = numpy.hstack((a, d))
    #             begin+=1
    #             end+=1
    #         #print "Ficou assim:", a.shape
    #         if ret.shape == (0,0):
    #             ret.shape = (a.shape[1], 0)
    #         ret = numpy.hstack((ret, a.T))
    #         #print "E agora, assim:", ret.shape

        # for i in range(it):
        #     buff = []
        #     lastsum = 0
        #     a = numpy.array(())
        #     a.shape = (2,0)
        #     window_track.metadata.feature += "tx_mean_" + feats[i] + "tx_var_" + feats[i]
        #
        #     for cur in range(0, texture_size):
        #         buff.append(dT[cur] if dT.ndim == 1 else dT[i, cur])
        #         lastsum += dT[cur] if dT.ndim == 1 else dT[i, cur]
        #
        #     mean = lastsum / texture_size
        #     accum = 0
        #     for k in range(0, texture_size):
        #         accum += (buff[k] - mean)**2
        #     var = (accum / texture_size)
        #     temp = numpy.vstack((mean,var))
        #     a = numpy.hstack((a,temp))
        #
        #     for cur in range(texture_size, total_aw):
        #         buff.append(dT[cur] if dT.ndim == 1 else dT[i, cur])
        #         lastsum += -buff[cur-texture_size] + buff[cur]
        #         mean = lastsum / texture_size
        #         accum = 0
        #         for k in range(cur - texture_size+1, cur):
        #             accum += (buff[k] - mean)**2
        #         var = (accum / texture_size)
        #         temp = numpy.vstack((mean,var))
        #         a = numpy.hstack((a,temp))
        #
        #     if ret.shape == (0,0):
        #         ret.shape = (a.shape[1], 0)
        #     ret = numpy.hstack((ret, a.T))

        window_track.data = ret
        return window_track
예제 #17
0
 def run(self, args):
     analysis_track = ft.FeatureTrack().load(args.infile)
     texture_size = args.texture_window_size
     window_track = self.to_texture(analysis_track, texture_size)
     window_track.save(args.outfile)
예제 #18
0
    def run(self, args):
        o = track.FeatureTrack()
        o = o.load(args.trackfile)
        #print o.metadata
        #print o.data

        # Read label file
        onsets = []
        offsets = []
        labels = []
        with open(args.labelfile) as f:
            content = f.readlines()
            for line in content:
                L = line.split()
                #print L
                onsets.append(float(L[0]))
                offsets.append(float(L[1]))
                L[2] = L[2].replace('_', '-')
                L[2] = L[2].replace('-', '+')
                labels.append(str(L[2].split('+')[0]))
                #print onsets[-1], offsets[-1], labels[-1]
        #exit()

        final_output = None
        final_filenames = []
        final_filenames.append(o.metadata.filename)
        if o.data.ndim == 1:
            o.data.shape = (o.data.size, 1)
        feats = o.metadata.feature.split()
        a = numpy.array(feats)
        i = a.argsort()
        ofs = o.metadata.sampling_configuration.ofs
        #print offsets[-1], o.data.shape, o.data.shape[0] / float(ofs)
        #print i

        my_features = o.metadata.feature.split()
        my_features.sort()
        new_features = ""
        if args.mean is True:
            for feat in my_features:
                new_features = new_features + " " + "mean_" + feat
        if args.variance is True:
            for feat in my_features:
                new_features = new_features + " " + "var_" + feat
        if args.slope is True:
            for feat in my_features:
                new_features = new_features + " " + "slope_" + feat
        if args.limits is True:
            for feat in my_features:
                new_features = new_features + " " + "max_" + feat
            for feat in my_features:
                new_features = new_features + " " + "argmax_" + feat
            for feat in my_features:
                new_features = new_features + " " + "min_" + feat
            for feat in my_features:
                new_features = new_features + " " + "argmin_" + feat

        new_features = new_features.strip()

        if args.csv is True:
            sys.stdout.write(new_features.replace(' ', ','))
            sys.stdout.write(',LABEL')
            sys.stdout.write('\n')

        #exit()

        for d in xrange(len(onsets)):
            out = numpy.array([])
            i = a.argsort()

            minN = int(onsets[d] * float(ofs))
            maxN = int(offsets[d] * float(ofs))
            if maxN <= (minN + 1):
                maxN = minN + 2
            #print minN, maxN, onsets[d], offsets[d], o.data.shape[0], ofs

            if args.mean is True:
                out = numpy.hstack((out, o.data[minN:maxN, :].mean(axis=0)[i]))
                #print out.shape, o.data[minN:maxN,:].mean(axis=0).shape

            if args.variance is True:
                out = numpy.hstack((out, o.data[minN:maxN, :].var(axis=0)[i]))

            if args.slope is True:
                variance = o.data[minN:maxN, :].var(axis=0)[i]
                lindata = numpy.zeros(variance.shape)
                for i in xrange(o.data[minN:maxN, i].shape[1]):
                    lindata[i] = scipy.stats.linregress(o.data[minN:maxN,i],\
                                     range(o.data[minN:maxN,:].shape[0]))[0]

                out = numpy.hstack((out, lindata))

            if args.csv is True:
                for i in xrange(len(out)):
                    sys.stdout.write(str(out[i]))
                    sys.stdout.write(",")
                sys.stdout.write(labels[d])
                sys.stdout.write('\n')

            if final_output is None:
                final_output = out
            else:
                final_output = numpy.vstack((final_output, out))

        p = feature_matrix.FeatureMatrix()
        p.data = final_output.copy()

        if args.normalize:
            std_p = p.data.std(axis=0)
            p.data = (p.data - p.data.mean(axis=0))/\
                    numpy.maximum(10**(-6), std_p)

        p.metadata.sampling_configuration = o.metadata.sampling_configuration
        p.metadata.feature = new_features
        p.metadata.filename = final_filenames
        p.save(args.outfile)
예제 #19
0
    def calculate_features_per_band(self,
                                    frequency_band,
                                    also_one_band=False,
                                    discard_bin_zero=False):
        """
        :param frequency_band: FrequencyBand
        :param also_one_band: boolean
        :param discard_bin_zero: boolean
        :return: list[FeatureTrack]
        """

        flatness = feat_flat.Flatness()
        energy = feat_energy.Energy()
        flux = feat_flux.Flux()
        centroid = feat_centroid.Centroid()
        rolloff = feat_rolloff.Rolloff()
        lowenergy = feat_lowenergy.LowEnergy()

        bands = [b for b in frequency_band.bands()]

        if also_one_band:
            bands.append((int(frequency_band.low), int(frequency_band.high)))

        for b in bands:
            lowbin = self.spectrogram.freq_bin(b[0])
            if lowbin == 0:
                if discard_bin_zero:
                    lowbin = 1
            highbin = self.spectrogram.freq_bin(b[1])
            #print "calculating features for band in bin range: ", lowbin, highbin

            features = []

            flatness_feature = flatness.calc_track_band(
                self.spectrogram, lowbin, highbin)
            flatness_feature.metadata.feature += ("_" + str(b[0])) + (
                "_" + str(b[1]))
            features.append(flatness_feature)

            energy_feature = energy.calc_track_band(self.spectrogram, lowbin,
                                                    highbin)
            energy_feature.metadata.feature += ("_" + str(b[0])) + ("_" +
                                                                    str(b[1]))
            features.append(energy_feature)

            flux_feature = flux.calc_track_band(self.spectrogram, lowbin,
                                                highbin)
            flux_feature.metadata.feature += ("_" + str(b[0])) + ("_" +
                                                                  str(b[1]))
            features.append(flux_feature)

            centroid_feature = centroid.calc_track_band(
                self.spectrogram, lowbin, highbin)
            centroid_feature.metadata.feature += ("_" + str(b[0])) + (
                "_" + str(b[1]))
            features.append(centroid_feature)

            rolloff_feature = rolloff.calc_track_band(self.spectrogram, lowbin,
                                                      highbin)
            rolloff_feature.metadata.feature += ("_" + str(b[0])) + ("_" +
                                                                     str(b[1]))
            features.append(rolloff_feature)

            lowenergy_feature = lowenergy.calc_track_band(
                self.spectrogram, 10, lowbin, highbin)
            lowenergy_feature.metadata.feature += ("_" + str(b[0])) + (
                "_" + str(b[1]))
            features.append(lowenergy_feature)

            self.features_per_band = len(features)

            self.band_features = np.hstack((self.band_features, features))

        #MFCC hack
        t = track.FeatureTrack()
        t.data = mfcc.mfcc(self.spectrogram, 13)
        t.metadata.sampling_configuration = self.spectrogram.metadata.sampling_configuration
        feature = ""
        for i in range(13):
            feature = feature + "MFCC_" + str(i) + " "
        t.metadata.feature = feature
        t.metadata.filename = self.spectrogram.metadata.input.name

        self.band_features = np.hstack((self.band_features, t))

        #Zero crossings
        t = track.FeatureTrack()
        t.data = tdomf.zero_crossings(self.audio_data, 1024, 512)
        t.metadata.sampling_configuration.fs = self.samplingrate
        t.metadata.sampling_configuration.ofs = self.samplingrate / 1024
        t.metadata.sampling_configuration.window_length = 512
        t.metadata.feature = "TDZeroCrossings"
        t.metadata.filename = self.spectrogram.metadata.input.name

        self.band_features = np.hstack((self.band_features, t))