Exemplo n.º 1
0
    def run(self, className, outfilename, param, dsname, gtname, evalconfig):

        try:
            classifier = param['classifier']
            gt = GroundTruth(classifier)
            gt.load(gtname)

            # force the GroundTruth class name to be the one specified by our project file, not
            # the one in the original groundtruth file
            gt.className = className

            ds = DataSet()
            ds.load(dsname)

            # some points may have failed to be analyzed, remove those from the GroundTruth
            pnames = ds.pointNames()
            for pid in list(gt.keys()):
                if pid not in pnames:
                    log.warning(
                        'Removing %s from GroundTruth as it could not be found in the merged dataset'
                        % pid)
                    del gt[pid]

            trainerFun, trainingparam, newds = getTrainer(
                classifier, param, ds)

            # run all the evaluations specified in the evaluation config
            for i, evalparam in enumerate(evalconfig):
                # if we already ran this evaluation, no need to run it again...
                resultFilename = outfilename + '_%d.result' % i
                if exists(resultFilename):
                    log.info('File %s already exists. Skipping evaluation...' %
                             resultFilename)
                    continue

                log.info(
                    'Running evaluation %d for: %s with classifier %s and dataset %s'
                    % (i, outfilename, param['classifier'],
                       param['preprocessing']))
                log.info('    PID: %d, parameters: %s' %
                         (os.getpid(), json.dumps(param)))

                # run evaluation
                confusion = evaluateNfold(evalparam['nfold'], ds, gt,
                                          trainerFun, **trainingparam)

                # write evaluation params & result
                with open(outfilename + '_%d.param' % i, 'w') as f:
                    yaml.dump({'model': param, 'evaluation': evalparam}, f)

                confusion.save(resultFilename)

        except Exception:
            log.error(
                'While doing evaluation with param = %s\nevaluation = %s' %
                (param, evalconfig))
            raise
Exemplo n.º 2
0
    def run(self, className, outfilename, param, dsname, gtname, evalconfig):

        try:
            classifier = param['classifier']
            gt = GroundTruth(classifier)
            gt.load(gtname)

            # force the GroundTruth class name to be the one specified by our project file, not
            # the one in the original groundtruth file
            gt.className = className

            ds = DataSet()
            ds.load(dsname)

            # some points may have failed to be analyzed, remove those from the GroundTruth
            pnames = ds.pointNames()
            for pid in list(gt.keys()):
                if pid not in pnames:
                    log.warning('Removing %s from GroundTruth as it could not be found in the merged dataset' % pid)
                    del gt[pid]

            trainerFun, trainingparam, newds = getTrainer(classifier, param, ds)

            # run all the evaluations specified in the evaluation config
            for i, evalparam in enumerate(evalconfig):
                # if we already ran this evaluation, no need to run it again...
                resultFilename = outfilename + '_%d.result' % i
                if exists(resultFilename):
                    log.info('File %s already exists. Skipping evaluation...' % resultFilename)
                    continue

                log.info('Running evaluation %d for: %s with classifier %s and dataset %s' % (i, outfilename,
                                                                                              param['classifier'],
                                                                                              param['preprocessing']))
                log.info('    PID: %d, parameters: %s' % (os.getpid(), json.dumps(param)))

                # run evaluation
                confusion = evaluateNfold(evalparam['nfold'], ds, gt, trainerFun, **trainingparam)

                # write evaluation params & result
                with open(outfilename + '_%d.param' % i, 'w') as f:
                    yaml.dump({ 'model': param, 'evaluation': evalparam }, f)

                confusion.save(resultFilename)

        except Exception:
            log.error('While doing evaluation with param = %s\nevaluation = %s' % (param, evalconfig))
            raise
Exemplo n.º 3
0
        alpha = float(sys.argv[6])

        filename = resultsdir + basename + '_simca_%d_%d_%.1f' % (
            featureSet, coveredVar, alpha)

        if os.path.exists(filename + '.result'):
            print 'File exists. Skipping Test.'
        else:
            ds, groundTruth = loadData(datasetName, featureSet)

            from classifier_SIMCA import train_SIMCA
            confusion = evaluateNfold(10,
                                      ds,
                                      groundTruth,
                                      train_SIMCA,
                                      descriptorNames='*',
                                      exclude=[],
                                      alpha=alpha,
                                      coveredVariance=coveredVar,
                                      useBoundaryDistance=True)
            print confusion.results()

            writeResult(filename, confusion)

    elif classifier == 'svm':
        svmtype = sys.argv[5]
        kernel = sys.argv[6]
        cexp = float(sys.argv[7])
        gammaexp = float(sys.argv[8])

        filename = resultsdir + basename + '_svm_%d_%s_%s_%.1f_%.1f' % (
Exemplo n.º 4
0
    featureSet = int(sys.argv[4])

    if classifier == 'simca':
        coveredVar = int(sys.argv[5])
        alpha = float(sys.argv[6])

        filename = resultsdir + basename + '_simca_%d_%d_%.1f' % (featureSet, coveredVar, alpha)

        if os.path.exists(filename + '.result'):
            print 'File exists. Skipping Test.'
        else:
            ds, groundTruth = loadData(datasetName, featureSet)

            from classifier_SIMCA import train_SIMCA
            confusion = evaluateNfold(10, ds, groundTruth, train_SIMCA,
                                      descriptorNames = '*', exclude = [],
                                      alpha = alpha, coveredVariance = coveredVar,
                                      useBoundaryDistance = True)
            print confusion.results()

            writeResult(filename, confusion)

    elif classifier == 'svm':
        svmtype = sys.argv[5]
        kernel = sys.argv[6]
        cexp = float(sys.argv[7])
        gammaexp = float(sys.argv[8])

        filename = resultsdir + basename + '_svm_%d_%s_%s_%.1f_%.1f' % (featureSet, svmtype, kernel, cexp, gammaexp)

        if os.path.exists(filename + '.result'):
            print 'File exists. Skipping Test.'