def testMFCC():
    '''Does a simple test by comparing which of MFCC-SG + Kullback-Leibler distance or
    MFCC-mean/var + euclidean distance gives the best results.'''
    ds = loadDataSet()

    ds = utils.addVarFromCov(ds, 'mfcc')

    groundTruth = utils.getGroundTruthFromLabel(ds, 'genre')

    print 'Evaluating 1-NN genre classification using euclidean distance on mfcc.mean and mfcc.var:'
    confusion = nnclassifier.evaluate_1NN(
        ds, groundTruth, 'euclidean',
        {'descriptorNames': ['mfcc.mean', 'mfcc.var']})
    print confusion.results()

    cmfile = join(tempdir, 'confusion_meanvar.html')
    open(cmfile, 'w').write(confusion.toHtml())
    print '(wrote confusion matrix to %s)' % cmfile
    print

    print 'Evaluating 1-NN genre classification using Kullback-Leibler distance on mfcc:'
    confusion = nnclassifier.evaluate_1NN(ds, groundTruth, 'kullbackleibler',
                                          {'descriptorName': 'mfcc'})
    print confusion.results()
    cmfile = join(tempdir, 'confusion_singlegaussian.html')
    open(cmfile, 'w').write(confusion.toHtml())
    print '(wrote confusion matrix to %s)' % cmfile
    print
Exemplo n.º 2
0
def testMFCC():
    '''Does a simple test by comparing which of MFCC-SG + Kullback-Leibler distance or
    MFCC-mean/var + euclidean distance gives the best results.'''
    ds = loadDataSet()

    ds = utils.addVarFromCov(ds, 'mfcc')

    groundTruth = utils.getGroundTruthFromLabel(ds, 'genre')


    print 'Evaluating 1-NN genre classification using euclidean distance on mfcc.mean and mfcc.var:'
    confusion = nnclassifier.evaluate_1NN(ds, groundTruth,
                                          'euclidean',
                                          { 'descriptorNames': [ 'mfcc.mean', 'mfcc.var' ] })
    print confusion.results()

    cmfile = join(tempdir, 'confusion_meanvar.html')
    open(cmfile, 'w').write(confusion.toHtml())
    print '(wrote confusion matrix to %s)' % cmfile
    print


    print 'Evaluating 1-NN genre classification using Kullback-Leibler distance on mfcc:'
    confusion = nnclassifier.evaluate_1NN(ds, groundTruth,
                                          'kullbackleibler',
                                          { 'descriptorName': 'mfcc' })
    print confusion.results()
    cmfile = join(tempdir, 'confusion_singlegaussian.html')
    open(cmfile, 'w').write(confusion.toHtml())
    print '(wrote confusion matrix to %s)' % cmfile
    print
def testClassify():
    ds = loadDataSet()

    # get ground truth
    groundTruth = utils.getGroundTruthFromLabel(ds, 'genre')

    ds_genre = transform(ds, 'select', {'descriptorNames': 'genre'})
    ds_mfcc = transform(ds, 'select', {'descriptorNames': 'mfcc*'})

    # transform dataset
    ds = transform(ds, 'select', {'descriptorNames': ['spectral*', 'genre']})
    ds_base = transform(ds, 'fixlength', {'descriptorNames': '*'})
    ds = transform(ds_base, 'gaussianize')
    ds = transform(ds, 'rca', {'dimension': 10, 'classLabel': 'genre'})
    ds = mergeDataSets(ds, ds_genre)
    ds = mergeDataSets(ds, ds_mfcc)

    # launch classification/evaluation
    from nnclassifier import evaluate_1NN

    results = []
    alpha = 0.0
    while alpha <= 1.0:
        print 'alpha =', alpha
        confusion = evaluate_1NN(
            ds, groundTruth, 'linearcombination', {
                'mfcc_kl': {
                    'name': 'kullbackleibler',
                    'weight': alpha,
                    'params': {
                        'descriptorName': 'mfcc'
                    }
                },
                'spectral_euclidean': {
                    'name': 'euclidean',
                    'weight': 1 - alpha,
                    'params': {
                        'descriptorNames': 'rca*'
                    }
                }
            })
        good = confusion.correct()
        total = confusion.total()
        print 'correctly classified:', good, 'out of', total, '(%d%%)' % int(
            100 * good / total)
        results.append(confusion.correct())
        alpha += 0.1

    # display results
    plotResults = False
    if plotResults:
        import pylab
        pylab.plot(results)
        pylab.show()
Exemplo n.º 4
0
def testClassify():
    ds = loadDataSet()


    # get ground truth
    groundTruth = utils.getGroundTruthFromLabel(ds, 'genre')

    ds_genre = transform(ds, 'select', { 'descriptorNames': 'genre' })
    ds_mfcc = transform(ds, 'select', { 'descriptorNames': 'mfcc*' })

    # transform dataset
    ds = transform(ds, 'select', { 'descriptorNames': [ 'spectral*', 'genre' ] })
    ds_base = transform(ds, 'fixlength', { 'descriptorNames': '*' })
    ds = transform(ds_base, 'gaussianize')
    ds = transform(ds, 'rca', { 'dimension': 10, 'classLabel': 'genre' })
    ds = mergeDataSets(ds, ds_genre)
    ds = mergeDataSets(ds, ds_mfcc)


    # launch classification/evaluation
    from nnclassifier import evaluate_1NN

    results = []
    alpha = 0.0
    while alpha <= 1.0:
        print 'alpha =', alpha
        confusion = evaluate_1NN(ds, groundTruth, 'linearcombination',
                                 { 'mfcc_kl': { 'name': 'kullbackleibler',
                                                'weight': alpha,
                                                'params': { 'descriptorName': 'mfcc' } },

                                   'spectral_euclidean': { 'name': 'euclidean',
                                                           'weight': 1-alpha,
                                                           'params': { 'descriptorNames': 'rca*' } }
                                   })
        good = confusion.correct()
        total = confusion.total()
        print 'correctly classified:', good, 'out of', total, '(%d%%)' % int(100*good/total)
        results.append(confusion.correct())
        alpha += 0.1

    # display results
    plotResults = False
    if plotResults:
        import pylab
        pylab.plot(results)
        pylab.show()