Пример #1
0
def features_gtzan(filename, directory=""):
    # Calculate spectrogram (normalizes wavfile)
    converter = spec.Wav2Spectrogram()
    s = converter.convert(open(directory + filename),
                          window_length=2048,
                          dft_length=2048,
                          window_step=1024,
                          spectrum_type='magnitude',
                          save_metadata=True)

    # Extract low-level features, derivatives, and run texture windows

    d = diff.Diff()
    features = (cent.Centroid(), roll.Rolloff(), flat.Flatness(), flux.Flux(),
                mfcc.Mfcc())

    all_feats = None
    for f in features:
        track = f.calc_track(s)  # Feature track
        all_feats = join.Join().join([all_feats, track])
        dtrack = d.calc_track(track)  # Differentiate
        all_feats = join.Join().join([all_feats, dtrack])
        ddtrack = d.calc_track(dtrack)  # Differentiate again
        all_feats = join.Join().join([all_feats, ddtrack])

        # Texture window
        t = tex.ToTextureWindow().to_texture(all_feats, 40)

    # Statistics
    s = stats.Stats()
    d = s.stats([t], mean=True, variance=True)
    return d
Пример #2
0
def tza_bands_parallel(experiment, n_processes=1):
    """
    :type experiment: BandExperiment
    :type n_processes: int
    """
    jobs = []

    with open(experiment.mirex_list_file) as f:
        files = f.read().splitlines()

    for f in files:
        jobs.append(
            BandJob(f,
                    experiment.band_iterator,
                    experiment.band_step,
                    experiment.band_nbands,
                    also_one_band=experiment.also_one_band,
                    lnf_use=experiment.lnf_use,
                    lnf_compensation=experiment.lnf_compensation,
                    lnf_passes=experiment.lnf_passes))

    #calculate features
    pool = Pool(processes=n_processes)
    features = pool.map(tza_bands, jobs)
    pool.close()
    pool.join()

    jobs = []
    for f in features:
        jobs.append((f, 100))

    #calculate texture windows
    pool = Pool(processes=n_processes)
    textures = pool.map(tza_calc_textures, jobs)
    pool.close()
    pool.join()

    stats = feat_stats.Stats()
    m = stats.stats(textures,
                    mean=experiment.mean,
                    variance=experiment.variance,
                    slope=experiment.slope,
                    limits=experiment.limits,
                    csv=experiment.csv,
                    normalize=experiment.normalize)

    f = open(experiment.mirex_scratch_folder + "/" + experiment.output_file,
             "wb")

    m.save(f, restore_state=True)

    f.close()

    return m
Пример #3
0
    def aggregate(self, feature_files):
        """
        This aggregator is a front-end to the pymir3 stats module. The statistics that must be computed
        are found in the simple_aggregation key in the experiment file.

        :param feature_files: a list of FeatureTrack filenames
        :type feature_files: list[str]
        :return:
        :rtype: None

        .. note::
            These keys are expected to be set in the experiment file:
                * ['simple_aggregation']['mean']
                * ['simple_aggregation']['delta']
                * ['simple_aggregation']['variance']
                * ['simple_aggregation']['acceleration']
                * ['simple_aggregation']['slope']
                * ['simple_aggregation']['limits']
                * ['simple_aggregation']['csv']
                * ['simple_aggregation']['normalize']
                * ['general']['scratch_directory']
                * ['feature_aggregation']['aggregated_output']

        """

        features = load_feature_files(feature_files)

        if self.params['simple_aggregation']['texture_windows']:

            #for i in range(len(feature_files)):
            #    feature_files[i] = feature_files[i] + "_tw"

            jobs = []
            out_idx = 0
            for f in features:
                jobs.append((
                    f,
                    self.params['simple_aggregation']['texture_window_length'],
                    feature_files[out_idx]))
                out_idx += 1

            num_files = len(jobs)
            output_buffer_size = self.params['simple_aggregation'][
                'tw_buffer_size']

            pool = Pool(
                processes=self.params['simple_aggregation']['tw_workers'])

            pool.map(calc_textures, jobs)

            # out_idx = 0

            # for i in range(0, num_files, output_buffer_size):
            #     print "Calculating texture windows %d through %d of %d" % (i + 1, min(i + output_buffer_size, num_files), num_files)

            #     result = pool.map(calc_textures, jobs[i:min(i + output_buffer_size, num_files)])

            #     for track in result:
            #         filename = feature_files[out_idx]
            #         print "writing features to file %s..." % (filename)
            #         feature_file = open(filename, "w")
            #         track.save(feature_file)
            #         feature_file.close()
            #         del track
            #         out_idx+=1

            #     del result
            #     gc.collect()

            pool.close()
            pool.join()
            features = None

        if features == None:
            features = load_feature_files(feature_files)

        stats = feat_stats.Stats()
        m = stats.stats(
            features,
            mean=self.params['simple_aggregation']['mean'],
            delta=self.params['simple_aggregation']['delta'],
            variance=self.params['simple_aggregation']['variance'],
            acceleration=self.params['simple_aggregation']['acceleration'],
            slope=self.params['simple_aggregation']['slope'],
            limits=self.params['simple_aggregation']['limits'],
            csv=self.params['simple_aggregation']['csv'],
            normalize=self.params['simple_aggregation']['normalize'])

        out = open(
            self.params['general']['scratch_directory'] + "/" +
            self.params['feature_aggregation']['aggregated_output'], "w")

        m.save(out)

        out.close()
Пример #4
0
    b = MelBand(low=int(feats.spectrogram.metadata.min_freq),
                high=int(feats.spectrogram.metadata.max_freq),
                nbands=10)
    import time

    b = OneBand(low=int(feats.spectrogram.metadata.min_freq),
                high=int(feats.spectrogram.metadata.max_freq))

    for k in a.bands():
        print k

    T0 = time.time()
    feats.calculate_features_per_band(b, True)
    T1 = time.time()
    print "Feature extraction took ", T1 - T0, " seconds"

    print feats.join_bands(crop=True).data.shape

    stats = feat_stats.Stats()
    m = stats.stats([feats.joined_features],
                    mean=True,
                    variance=True,
                    slope=False,
                    limits=False,
                    csv=False,
                    normalize=False)

    print m.data

    #print feats.band_features[0]