コード例 #1
0
def test_active_set_selection():
    params = ParameterContainer().load(
        filename=os.path.join('material', 'test.parameters.yaml'))
    params['active_set'] = 'set1'
    params.process(create_directories=False, create_parameter_hints=False)

    nose.tools.eq_(params.get_path('learner.parameters.value1'), 'learner2')

    params = ParameterContainer().load(
        filename=os.path.join('material', 'test.parameters.yaml'))
    params['active_set'] = 'set2'
    params.process(create_directories=False, create_parameter_hints=False)

    nose.tools.eq_(params.get_path('learner.parameters.value1'), 'learner3')
コード例 #2
0
def test_processing_chain():
    FeatureExtractor(store=True, overwrite=True).extract(
        audio_file=os.path.join('material', 'test.wav'),
        extractor_name='mfcc',
        extractor_params={'mfcc': {
            'n_mfcc': 10
        }},
        storage_paths={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    # Test #1
    test_recipe = 'mfcc=0-5'
    test_recipe_parsed = ParameterContainer()._parse_recipe(recipe=test_recipe)

    feature_repository = FeatureRepository(
        filename_dict={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    feature_stacker = FeatureStacker(recipe=test_recipe_parsed)
    feature_container = feature_stacker.process(
        feature_data=feature_repository)

    feature_chain = ProcessingChain()
    feature_chain.append(feature_stacker)

    feature_container_chain = feature_chain.process(data=feature_repository)

    numpy.testing.assert_array_equal(feature_container.feat,
                                     feature_container_chain.feat)
コード例 #3
0
def test_data_processor():
    FeatureExtractor(store=True, overwrite=True).extract(
        audio_file=os.path.join('material', 'test.wav'),
        extractor_name='mfcc',
        extractor_params={'mfcc': {
            'n_mfcc': 10
        }},
        storage_paths={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    # Test #1
    test_recipe = 'mfcc=0-5'
    test_recipe_parsed = ParameterContainer()._parse_recipe(recipe=test_recipe)

    feature_repository = FeatureRepository(
        filename_dict={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    feature_stacker = FeatureStacker(recipe=test_recipe_parsed)
    feature_container = feature_stacker.process(
        feature_data=feature_repository)

    ds = DataSequencer(frames=10, hop=10, padding=False)
    target_data = ds.process(data=feature_container.feat[0])

    dp = DataProcessor(feature_processing_chain=ProcessingChain(
        [feature_stacker]),
                       data_processing_chain=ProcessingChain([ds]))
    processed_data, feature_matrix_size = dp.process(
        feature_data=feature_repository)

    numpy.testing.assert_array_equal(target_data, processed_data[:, 0, :, :])
コード例 #4
0
ファイル: analyzer.py プロジェクト: ufield/dcase2017work
    def _set_params(self, project_name, set_id):
        project_base = env['MYWORK_BASE'] + '/main'
        params = ParameterContainer(
            project_base=project_base,
            path_structure={
                'feature_extractor':
                ['dataset', 'feature_extractor.parameters.*'],
                'feature_normalizer':
                ['dataset', 'feature_extractor.parameters.*'],
                'learner': [
                    'dataset', 'feature_extractor', 'feature_stacker',
                    'feature_normalizer', 'feature_aggregator', 'learner'
                ],
                'recognizer': [
                    'dataset', 'feature_extractor', 'feature_stacker',
                    'feature_normalizer', 'feature_aggregator', 'learner',
                    'recognizer'
                ],
            })

        params.load(filename=project_base + '/parameters/' + project_name +
                    '.defaults.yaml')
        params['active_set'] = set_id  # ex) 'crnn'
        params.process()  # 実行用のパラメータ構造に変える?

        return params
コード例 #5
0
def test_path():
    params = ParameterContainer().load(
        filename=os.path.join('material', 'test.parameters.yaml'))
    params.process(create_directories=False, create_parameter_hints=False)

    nose.tools.assert_list_equal(
        sorted(list(params.get_path('path.feature_extractor').keys())),
        ['featA', 'featB'])
    nose.tools.assert_list_equal(
        sorted(list(params.get_path('path.feature_normalizer').keys())),
        ['featA', 'featB'])

    nose.tools.eq_(
        len(
            params.get_path('path.learner').replace(
                params.get_path('path.system_base'), '').split('/')),
        len(params.path_structure['learner']) + 1)
    nose.tools.eq_(
        len(
            params.get_path('path.recognizer').replace(
                params.get_path('path.system_base'), '').split('/')),
        len(params.path_structure['recognizer']) + 1)
コード例 #6
0
def get_learner_params(parameters_filename, parameter_set):

    # Initialize ParameterContainer
    params = ParameterContainer(
        project_base=os.path.dirname(os.path.realpath(__file__)))

    # Load default parameters from a file
    params.load(filename=parameters_filename)

    params['active_set'] = parameter_set

    # Process parameters
    params.process()

    return DottedDict(params.get_path('learner')['parameters'])
コード例 #7
0
def learner_batcher_params(parameter_set):
    # Load default parameters from a file
    default_parameters_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'parameters',
                                               'task1' + '.defaults.yaml')

    # Initialize ParameterContainer
    params = ParameterContainer(project_base=os.path.dirname(os.path.realpath(__file__)))

    # Load default parameters from a file
    params.load(filename=default_parameters_filename)

    params['active_set'] = parameter_set

    # Process parameters
    params.process()

    learner_params = DottedDict(params.get_path('learner')['parameters'])

    return learner_params
コード例 #8
0
def main(argv):
    numpy.random.seed(123456)  # let's make randomization predictable

    parser = argparse.ArgumentParser(
        prefix_chars='-+',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent('''\
            DCASE 2017
            Task 4: Large-scale weakly supervised sound event detection for smart cars
            ---------------------------------------------
                Carnegie Mellon University
                Author:  Rohan Badlani ( [email protected] )

            System description
            The baseline system for task 4 in DCASE 2017 Challenge.
                Features: log mel-band energies
                Classifier: MLP
        '''))

    # Setup argument handling
    parser.add_argument('-m',
                        '--mode',
                        choices=('dev', 'challenge'),
                        default=None,
                        help="Selector for system mode",
                        required=False,
                        dest='mode',
                        type=str)

    parser.add_argument('-p',
                        '--parameters',
                        help='parameter file override',
                        dest='parameter_override',
                        required=False,
                        metavar='FILE',
                        type=argument_file_exists)

    parser.add_argument('-s',
                        '--parameter_set',
                        help='Parameter set id',
                        dest='parameter_set',
                        required=False,
                        type=str)

    parser.add_argument("-n",
                        "--node",
                        help="Node mode",
                        dest="node_mode",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_sets",
                        help="List of available parameter sets",
                        dest="show_set_list",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_datasets",
                        help="List of available datasets",
                        dest="show_dataset_list",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_parameters",
                        help="Show parameters",
                        dest="show_parameters",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_eval",
                        help="Show evaluated setups",
                        dest="show_eval",
                        action='store_true',
                        required=False)

    parser.add_argument("-o",
                        "--overwrite",
                        help="Overwrite mode",
                        dest="overwrite",
                        action='store_true',
                        required=False)

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + __version__)

    # Parse arguments
    args = parser.parse_args()

    # Load default parameters from a file
    default_parameters_filename = os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        os.path.splitext(os.path.basename(__file__))[0] + '.defaults.yaml')
    if args.parameter_set:
        parameters_sets = args.parameter_set.split(',')
    else:
        parameters_sets = [None]

    for parameter_set in parameters_sets:
        # Initialize ParameterContainer
        params = ParameterContainer(
            project_base=os.path.dirname(os.path.realpath(__file__)))

        # Load default parameters from a file
        params.load(filename=default_parameters_filename)

        if args.parameter_override:
            # Override parameters from a file
            params.override(override=args.parameter_override)

        if parameter_set:
            # Override active_set
            params['active_set'] = parameter_set

        # Process parameters
        params.process()

        # Force overwrite
        if args.overwrite:
            params['general']['overwrite'] = True

        # Override dataset mode from arguments
        if args.mode == 'dev':
            # Set dataset to development
            params['dataset']['method'] = 'development'

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters(section='dataset')

        elif args.mode == 'challenge':
            # Set dataset to training set for challenge
            params['dataset']['method'] = 'challenge_train'
            params['general']['challenge_submission_mode'] = True
            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters(section='dataset')

        if args.node_mode:
            params['general']['log_system_progress'] = True
            params['general']['print_system_progress'] = False

        # Force ascii progress bar under Windows console
        if platform.system() == 'Windows':
            params['general']['use_ascii_progress_bar'] = True

        # Setup logging
        setup_logging(parameter_container=params['logging'])

        app = CustomAppCore(
            name='DCASE 2017::Acoustic Scene Classification / Baseline System',
            params=params,
            system_desc=params.get('description'),
            system_parameter_set_id=params.get('active_set'),
            setup_label='Development setup',
            log_system_progress=params.get_path('general.log_system_progress'),
            show_progress_in_console=params.get_path(
                'general.print_system_progress'),
            use_ascii_progress_bar=params.get_path(
                'general.use_ascii_progress_bar'))

        # Show parameter set list and exit
        if args.show_set_list:
            params_ = ParameterContainer(
                project_base=os.path.dirname(os.path.realpath(__file__))).load(
                    filename=default_parameters_filename)

            if args.parameter_override:
                # Override parameters from a file
                params_.override(override=args.parameter_override)
            if 'sets' in params_:
                app.show_parameter_set_list(set_list=params_['sets'])

            return

        # Show dataset list and exit
        if args.show_dataset_list:
            app.show_dataset_list()
            return

        # Show system parameters
        if params.get_path(
                'general.log_system_parameters') or args.show_parameters:
            app.show_parameters()

        # Show evaluated systems
        if args.show_eval:
            app.show_eval()
            return

        # Initialize application
        # ==================================================
        if params['flow']['initialize']:
            app.initialize()

        # Extract features for all audio files in the dataset
        # ==================================================
        if params['flow']['extract_features']:
            app.feature_extraction()

        # Prepare feature normalizers
        # ==================================================
        if params['flow']['feature_normalizer']:
            app.feature_normalization()

        # System training
        # ==================================================
        if params['flow']['train_system']:
            app.system_training()

        # System evaluation
        if not args.mode or args.mode == 'dev':

            # System testing
            # ==================================================
            if params['flow']['test_system']:
                app.system_testing()

            # System evaluation
            # ==================================================
            if params['flow']['evaluate_system']:
                app.system_evaluation()

        # System evaluation in challenge mode
        elif args.mode == 'challenge':
            # Set dataset to testing set for challenge
            params['dataset']['method'] = 'challenge_test'

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters('dataset')

            if params['general']['challenge_submission_mode']:
                # If in submission mode, save results in separate folder for easier access
                params['path']['recognizer'] = params.get_path(
                    'path.recognizer_challenge_output')

            challenge_app = CustomAppCore(
                name=
                'DCASE 2017::Acoustic Scene Classification / Baseline System',
                params=params,
                system_desc=params.get('description'),
                system_parameter_set_id=params.get('active_set'),
                setup_label='Evaluation setup',
                log_system_progress=params.get_path(
                    'general.log_system_progress'),
                show_progress_in_console=params.get_path(
                    'general.print_system_progress'),
                use_ascii_progress_bar=params.get_path(
                    'general.use_ascii_progress_bar'))
            # Initialize application
            if params['flow']['initialize']:
                challenge_app.initialize()

            # Extract features for all audio files in the dataset
            if params['flow']['extract_features']:
                challenge_app.feature_extraction()

            # System testing
            if params['flow']['test_system']:
                if params['general']['challenge_submission_mode']:
                    params['general']['overwrite'] = True

                challenge_app.system_testing()

                if params['general']['challenge_submission_mode']:
                    challenge_app.ui.line(" ")
                    challenge_app.ui.line(
                        "Results for the challenge are stored at [" +
                        params.get_path('path.recognizer_challenge_output') +
                        "]")
                    challenge_app.ui.line(" ")

            # System evaluation if not in challenge submission mode
            if params['flow']['evaluate_system']:
                challenge_app.system_evaluation()

    return 0
コード例 #9
0
def test_process():
    FeatureExtractor(store=True, overwrite=True).extract(
        audio_file=os.path.join('material', 'test.wav'),
        extractor_name='mfcc',
        extractor_params={'mfcc': {
            'n_mfcc': 10
        }},
        storage_paths={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    # Test #1
    test_recipe = 'mfcc=0-5'
    test_recipe_parsed = ParameterContainer()._parse_recipe(recipe=test_recipe)

    feature_repository = FeatureRepository(
        filename_list={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    feature_stacker = FeatureStacker(recipe=test_recipe_parsed)
    feature_container = feature_stacker.process(
        feature_repository=feature_repository)

    nose.tools.assert_list_equal(sorted(list(feature_container.keys())),
                                 ['feat', 'meta', 'stat'])

    nose.tools.eq_(feature_container.channels, 1)
    nose.tools.eq_(feature_container.frames, 501)
    nose.tools.eq_(feature_container.vector_length, 6)

    nose.tools.eq_(feature_container.meta['audio_file'], 'material/test.wav')

    # Stat
    nose.tools.eq_(feature_container.stat[0]['N'], 501)
    nose.tools.assert_list_equal(
        sorted(list(feature_container.stat[0].keys())),
        ['N', 'S1', 'S2', 'mean', 'std'])

    # Feat
    # Shape
    nose.tools.eq_(feature_container.feat[0].shape[0], 501)
    nose.tools.eq_(feature_container.feat[0].shape[1], 6)

    nose.tools.eq_(feature_container.shape[0], 501)
    nose.tools.eq_(feature_container.shape[1], 6)

    # Test #2
    test_recipe = 'mfcc=1,2,3,4'
    test_recipe_parsed = ParameterContainer()._parse_recipe(recipe=test_recipe)

    feature_repository = FeatureRepository(
        filename_list={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    feature_stacker = FeatureStacker(recipe=test_recipe_parsed)
    feature_container = feature_stacker.process(
        feature_repository=feature_repository)

    nose.tools.assert_list_equal(sorted(list(feature_container.keys())),
                                 ['feat', 'meta', 'stat'])

    nose.tools.eq_(feature_container.channels, 1)
    nose.tools.eq_(feature_container.frames, 501)
    nose.tools.eq_(feature_container.vector_length, 4)

    nose.tools.eq_(feature_container.meta['audio_file'], 'material/test.wav')

    # Stat
    nose.tools.eq_(feature_container.stat[0]['N'], 501)
    nose.tools.assert_list_equal(
        sorted(list(feature_container.stat[0].keys())),
        ['N', 'S1', 'S2', 'mean', 'std'])

    # Feat
    # Shape
    nose.tools.eq_(feature_container.feat[0].shape[0], 501)
    nose.tools.eq_(feature_container.feat[0].shape[1], 4)

    nose.tools.eq_(feature_container.shape[0], 501)
    nose.tools.eq_(feature_container.shape[1], 4)

    # Test #1
    test_recipe = 'mfcc'
    test_recipe_parsed = ParameterContainer()._parse_recipe(recipe=test_recipe)

    feature_repository = FeatureRepository(
        filename_list={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    feature_stacker = FeatureStacker(recipe=test_recipe_parsed)
    feature_container = feature_stacker.process(
        feature_repository=feature_repository)

    nose.tools.assert_list_equal(sorted(list(feature_container.keys())),
                                 ['feat', 'meta', 'stat'])

    nose.tools.eq_(feature_container.channels, 1)
    nose.tools.eq_(feature_container.frames, 501)
    nose.tools.eq_(feature_container.vector_length, 10)

    nose.tools.eq_(feature_container.meta['audio_file'], 'material/test.wav')

    # Stat
    nose.tools.eq_(feature_container.stat[0]['N'], 501)
    nose.tools.assert_list_equal(
        sorted(list(feature_container.stat[0].keys())),
        ['N', 'S1', 'S2', 'mean', 'std'])

    # Feat
    # Shape
    nose.tools.eq_(feature_container.feat[0].shape[0], 501)
    nose.tools.eq_(feature_container.feat[0].shape[1], 10)

    nose.tools.eq_(feature_container.shape[0], 501)
    nose.tools.eq_(feature_container.shape[1], 10)
コード例 #10
0
def test_normalizer():
    FeatureExtractor(store=True, overwrite=True).extract(
        audio_file=os.path.join('material', 'test.wav'),
        extractor_name='mfcc',
        extractor_params={'mfcc': {
            'n_mfcc': 10
        }},
        storage_paths={'mfcc': os.path.join('material', 'test.mfcc.cpickle')})

    # Test 1
    test_recipe = 'mfcc=0-5'
    test_recipe_parsed = ParameterContainer()._parse_recipe(recipe=test_recipe)
    feature_container = FeatureContainer().load(
        filename=os.path.join('material', 'test.mfcc.cpickle'))
    feature_normalizer = FeatureNormalizer().accumulate(
        feature_container=feature_container).finalize()

    feature_stacker = FeatureStacker(recipe=test_recipe_parsed)
    feature_normalizer = feature_stacker.normalizer(
        normalizer_list={'mfcc': feature_normalizer})

    nose.tools.eq_(feature_normalizer['N'][0][0], 501)
    nose.tools.eq_(feature_normalizer['mean'][0].shape[0], 1)
    nose.tools.eq_(feature_normalizer['mean'][0].shape[1], 6)

    nose.tools.eq_(feature_normalizer['std'][0].shape[0], 1)
    nose.tools.eq_(feature_normalizer['std'][0].shape[1], 6)

    # Test 2
    test_recipe = 'mfcc=1,2,3,4'
    test_recipe_parsed = ParameterContainer()._parse_recipe(recipe=test_recipe)
    feature_container = FeatureContainer().load(
        filename=os.path.join('material', 'test.mfcc.cpickle'))
    feature_normalizer = FeatureNormalizer().accumulate(
        feature_container=feature_container).finalize()

    feature_stacker = FeatureStacker(recipe=test_recipe_parsed)
    feature_normalizer = feature_stacker.normalizer(
        normalizer_list={'mfcc': feature_normalizer})

    nose.tools.eq_(feature_normalizer['N'][0][0], 501)
    nose.tools.eq_(feature_normalizer['mean'][0].shape[0], 1)
    nose.tools.eq_(feature_normalizer['mean'][0].shape[1], 4)

    nose.tools.eq_(feature_normalizer['std'][0].shape[0], 1)
    nose.tools.eq_(feature_normalizer['std'][0].shape[1], 4)

    # Test 3
    test_recipe = 'mfcc'
    test_recipe_parsed = ParameterContainer()._parse_recipe(recipe=test_recipe)
    feature_container = FeatureContainer().load(
        filename=os.path.join('material', 'test.mfcc.cpickle'))
    feature_normalizer = FeatureNormalizer().accumulate(
        feature_container=feature_container).finalize()

    feature_stacker = FeatureStacker(recipe=test_recipe_parsed)
    feature_normalizer = feature_stacker.normalizer(
        normalizer_list={'mfcc': feature_normalizer})

    nose.tools.eq_(feature_normalizer['N'][0][0], 501)
    nose.tools.eq_(feature_normalizer['mean'][0].shape[0], 1)
    nose.tools.eq_(feature_normalizer['mean'][0].shape[1], 10)

    nose.tools.eq_(feature_normalizer['std'][0].shape[0], 1)
    nose.tools.eq_(feature_normalizer['std'][0].shape[1], 10)
コード例 #11
0
def test_recipe_parse():
    # Test #1
    test_recipe = 'mel'
    params = ParameterContainer()
    parsed_recipe = params._parse_recipe(recipe=test_recipe)

    # correct amount of items
    nose.tools.eq_(len(parsed_recipe), 1)

    # method is correct
    nose.tools.eq_(parsed_recipe[0]['method'], 'mel')

    # Test #2
    test_recipe = 'mel=0;mfcc=1'
    parsed_recipe = params._parse_recipe(recipe=test_recipe)

    # correct amount of items
    nose.tools.eq_(len(parsed_recipe), 2)

    # methods are correct
    nose.tools.eq_(parsed_recipe[0]['method'], 'mel')
    nose.tools.eq_(parsed_recipe[1]['method'], 'mfcc')

    # vector-index is correct / channel
    nose.tools.eq_(parsed_recipe[0]['vector-index']['channel'], 0)
    nose.tools.eq_(parsed_recipe[1]['vector-index']['channel'], 1)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['full'], True)
    nose.tools.eq_(parsed_recipe[1]['vector-index']['full'], True)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['selection'], False)
    nose.tools.eq_(parsed_recipe[1]['vector-index']['selection'], False)

    # Test #3
    test_recipe = 'mel=1-20'
    parsed_recipe = params._parse_recipe(recipe=test_recipe)

    # correct amount of items
    nose.tools.eq_(len(parsed_recipe), 1)

    # method is correct
    nose.tools.eq_(parsed_recipe[0]['method'], 'mel')

    # vector-index is correct / channel
    nose.tools.eq_(parsed_recipe[0]['vector-index']['channel'], 0)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['full'], False)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['selection'], False)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['start'], 1)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['end'], 21)

    # Test #4
    test_recipe = 'mel=1,2,4,5'
    parsed_recipe = params._parse_recipe(recipe=test_recipe)

    # correct amount of items
    nose.tools.eq_(len(parsed_recipe), 1)

    # extractor is correct
    nose.tools.eq_(parsed_recipe[0]['method'], 'mel')

    # vector-index is correct / channel
    nose.tools.eq_(parsed_recipe[0]['vector-index']['channel'], 0)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['full'], False)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['selection'], True)
    nose.tools.assert_list_equal(parsed_recipe[0]['vector-index']['vector'],
                                 [1, 2, 4, 5])

    # Test #5
    test_recipe = 'mel=1:1-20'
    parsed_recipe = params._parse_recipe(recipe=test_recipe)

    # correct amount of items
    nose.tools.eq_(len(parsed_recipe), 1)

    # method is correct
    nose.tools.eq_(parsed_recipe[0]['method'], 'mel')

    # vector-index is correct / channel
    nose.tools.eq_(parsed_recipe[0]['vector-index']['channel'], 1)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['full'], False)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['selection'], False)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['start'], 1)
    nose.tools.eq_(parsed_recipe[0]['vector-index']['end'], 21)
コード例 #12
0
def main(argv):
    numpy.random.seed(123456)  # let's make randomization predictable

    parser = argparse.ArgumentParser(
        prefix_chars='-+',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent('''\
            DCASE 2017
            Task 1: Acoustic Scene Classification
            Example how to customize applicationS
            ---------------------------------------------
                Tampere University of Technology / Audio Research Group
                Author:  Toni Heittola ( [email protected] )

            System description
                A system for acoustic scene classification, using DCASE 2013 Challenge evalution dataset.
                Features: mean and std of centroid + zero crossing rate inside 1 second non-overlapping segments
                Classifier: SVM

        '''))

    # Setup argument handling
    parser.add_argument('-m', '--mode',
                        choices=('dev', 'challenge'),
                        default=None,
                        help="Selector for system mode",
                        required=False,
                        dest='mode',
                        type=str)

    parser.add_argument('-p', '--parameters',
                        help='parameter file override',
                        dest='parameter_override',
                        required=False,
                        metavar='FILE',
                        type=argument_file_exists)

    parser.add_argument('-s', '--parameter_set',
                        help='Parameter set id',
                        dest='parameter_set',
                        required=False,
                        type=str)

    parser.add_argument("-n", "--node",
                        help="Node mode",
                        dest="node_mode",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_sets",
                        help="List of available parameter sets",
                        dest="show_set_list",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_datasets",
                        help="List of available datasets",
                        dest="show_dataset_list",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_parameters",
                        help="Show parameters",
                        dest="show_parameters",
                        action='store_true',
                        required=False)

    parser.add_argument("-eval_path",
                        help="path to save evaluation results",
                        dest="eval_path",
                        required=False,
                        metavar="DIR")

    parser.add_argument("-o", "--overwrite",
                        help="Overwrite mode",
                        dest="overwrite",
                        action='store_true',
                        required=False)

    parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)

    # Parse arguments
    args = parser.parse_args()

    # Load default parameters from a file
    default_parameters_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                               os.path.splitext(os.path.basename(__file__))[0]+'.defaults.yaml')
    if args.parameter_set:
        parameters_sets = args.parameter_set.split(',')
    else:
        parameters_sets = [None]

    for parameter_set in parameters_sets:
        # Initialize ParameterContainer
        params = ParameterContainer(project_base=os.path.dirname(os.path.realpath(__file__)))

        # Load default parameters from a file
        params.load(filename=default_parameters_filename)

        if args.parameter_override:
            # Override parameters from a file
            params.override(override=args.parameter_override)

        if parameter_set:
            # Override active_set
            params['active_set'] = parameter_set

        # Process parameters
        params.process()

        # Force overwrite
        if args.overwrite:
            params['general']['overwrite'] = True

        # Override dataset mode from arguments
        if args.mode == 'dev':
            params['dataset']['method'] = 'development'               # Set dataset to development
            params.process_method_parameters(section='dataset')       # Process dataset again, move correct parameters from dataset_parameters

        elif args.mode == 'challenge':
            params['dataset']['method'] = 'challenge_train'           # Set dataset to training set for challenge
            params.process_method_parameters(section='dataset')       # Process dataset again, move correct parameters from dataset_parameters

        if args.node_mode:
            params['general']['log_system_progress'] = True
            params['general']['print_system_progress'] = False

        # Setup logging
        setup_logging(parameter_container=params['logging'])

        app = CustomAppCore(name='DCASE 2017::Acoustic Scene Classification / Baseline System',
                            params=params,
                            system_desc=params.get('description'),
                            system_parameter_set_id=params.get('active_set'),
                            setup_label='Development setup',
                            log_system_progress=params.get_path('general.log_system_progress'),
                            show_progress_in_console=params.get_path('general.print_system_progress'),
                            )

        # Show parameter set list and exit
        if args.show_set_list:
            params_ = ParameterContainer(project_base=os.path.dirname(os.path.realpath(__file__))).load(filename=default_parameters_filename)
            if args.parameter_override:
                # Override parameters from a file
                params_.override(override=args.parameter_override)
            if 'sets' in params_:
                app.show_parameter_set_list(set_list=params_['sets'])

            return

        # Show dataset list and exit
        if args.show_dataset_list:
            app.show_dataset_list()
            return

        # Show system parameters
        if params.get_path('general.log_system_parameters') or args.show_parameters:
            app.show_parameters()

        # Initialize application
        # ==================================================
        if params['flow']['initialize']:
            app.initialize()

        # Extract features for all audio files in the dataset
        # ==================================================
        if params['flow']['extract_features']:
            app.feature_extraction()

        # Prepare feature normalizers
        # ==================================================
        if params['flow']['feature_normalizer']:
            app.feature_normalization()

        # System training
        # ==================================================
        if params['flow']['train_system']:
            app.system_training()

        # System evaluation
        if not args.mode or args.mode == 'dev':

            # System testing
            # ==================================================
            if params['flow']['test_system']:
                app.system_testing()

            # System evaluation
            # ==================================================
            if params['flow']['evaluate_system']:
                app.system_evaluation()

        # System evaluation in challenge mode
        elif args.mode == 'challenge':
            # Set dataset to testing set for challenge
            params['dataset']['method'] = 'challenge_test'

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters('dataset')

            if params['general']['challenge_submission_mode']:
                # If in submission mode, save results in separate folder for easier access
                params['path']['recognizer'] = params.get_path('path.recognizer_challenge_output')

            challenge_app = CustomAppCore(name='DCASE 2017::Acoustic Scene Classification / Baseline System',
                                          params=params,
                                          setup_label='Evaluation setup'
                                          )
            # Initialize application
            # ==================================================
            if params['flow']['initialize']:
                challenge_app.initialize()

            # Extract features for all audio files in the dataset
            if params['flow']['extract_features']:
                challenge_app.feature_extraction()

            # System testing
            if params['flow']['test_system']:
                if params['general']['challenge_submission_mode']:
                    params['general']['overwrite'] = True

                challenge_app.system_testing()

                if params['general']['challenge_submission_mode']:
                    challenge_app.ui.line(" ")
                    challenge_app.ui.line("Results for the challenge are stored at ["+params.get_path('path.recognizer_challenge_output')+"]")
                    challenge_app.ui.line(" ")

            # System evaluation if not in challenge submission mode
            if params['flow']['evaluate_system']:
                challenge_app.system_evaluation()

    return 0
コード例 #13
0
def test_hash():
    data_hash_target = 'be2ebe4cb85c65e7e679c55595c62446'
    data1 = {
        'field1': {
            'enable': True,
            '1': [1, 2, 3],
            '2': 1234,
        },
        'field2': {
            'sub_field1': 1234
        },
        'field3': {
            'enable': False,
            'sub_field1': 1234
        }
    }
    nose.tools.eq_(ParameterContainer(data1).get_hash(), data_hash_target)

    # False valued field
    data2 = {
        'field2': {
            'sub_field1': 1234
        },
        'field1': {
            '2': 1234,
            '1': [1, 2, 3],
            'enable': True,
        },
        'field3': {
            'enable': False,
            'sub_field1': 1234
        }
    }
    nose.tools.eq_(ParameterContainer(data2).get_hash(), data_hash_target)

    # False valued field
    data3 = {
        'field1': {
            'enable': True,
            '1': [1, 2, 3],
            '2': 1234,
        },
        'field2': {
            'sub_field1': 1234,
            'field': False
        },
        'field3': {
            'enable': False,
            'sub_field1': 1234
        }
    }
    nose.tools.eq_(ParameterContainer(data3).get_hash(), data_hash_target)

    # Change value in disabled section
    data4 = {
        'field1': {
            'enable': True,
            '1': [1, 2, 3],
            '2': 1234,
        },
        'field2': {
            'sub_field1': 1234,
            'field': False
        },
        'field3': {
            'enable': False,
            'sub_field1': 4321
        }
    }
    nose.tools.eq_(ParameterContainer(data4).get_hash(), data_hash_target)
コード例 #14
0
ファイル: task2.py プロジェクト: zhang201882/requirement.txt
def main(argv):
    numpy.random.seed(123456)  # let's make randomization predictable

    parser = argparse.ArgumentParser(
        prefix_chars='-+',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent(
            ''' DCASE 2017 Task 2: Detection of rare sound events Baseline System '''
        ))

    # Setup argument handling
    parser.add_argument('-m',
                        '--mode',
                        choices=('dev', 'challenge'),
                        default='challenge',
                        help="Selector for system mode",
                        required=False,
                        dest='mode',
                        type=str)
    parser.add_argument("-o",
                        "--overwrite",
                        choices=('true', 'false'),
                        default='false',
                        help="Overwrite exsiting files",
                        dest="overwrite",
                        required=False)
    parser.add_argument("-gpu",
                        "--gpu",
                        default=0,
                        help="choose which gpu to use",
                        required=True)

    # Parse arguments
    args = parser.parse_args()

    # set GPU number
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # Load default parameters from a file
    default_parameters_filename = os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        'parameters/task2.defaults.yaml')

    # Initialize ParameterContainer
    params = ParameterContainer(
        project_base=os.path.dirname(os.path.realpath(__file__)),
        path_structure={
            'feature_extractor': ['dataset', 'feature_extractor.parameters.*'],
            'feature_normalizer':
            ['dataset', 'feature_extractor.parameters.*'],
            'learner': [
                'dataset', 'feature_extractor', 'feature_stacker',
                'feature_normalizer', 'feature_aggregator', 'learner'
            ],
            'recognizer': [
                'dataset', 'feature_extractor', 'feature_stacker',
                'feature_normalizer', 'feature_aggregator', 'learner',
                'recognizer'
            ],
        })

    # Load default parameters from a file
    params.load(filename=default_parameters_filename)

    # Process parameters
    params.process()

    # Force overwrite
    if args.overwrite == 'true':
        params['general']['overwrite'] = True

    # Override dataset mode from arguments
    if args.mode == 'dev':
        # Set dataset to development
        params['dataset']['method'] = 'development'

        # Process dataset again, move correct parameters from dataset_parameters
        params.process_method_parameters(section='dataset')

    elif args.mode == 'challenge':
        # Set dataset to training set for challenge
        params['dataset']['method'] = 'challenge_train'
        params['general']['challenge_submission_mode'] = True

        # Process dataset again, move correct parameters from dataset_parameters
        params.process_method_parameters(section='dataset')

    # Setup logging
    setup_logging(parameter_container=params['logging'])

    app = Task2AppCore(
        name='DCASE 2017::Detection of rare sound events / Baseline System',
        params=params,
        system_desc=params.get('description'),
        system_parameter_set_id=params.get('active_set'),
        setup_label='Development setup',
        log_system_progress=params.get_path('general.log_system_progress'),
        show_progress_in_console=params.get_path(
            'general.print_system_progress'),
        use_ascii_progress_bar=params.get_path(
            'general.use_ascii_progress_bar'))

    # Initialize application
    # ==================================================
    if params['flow']['initialize']:
        app.initialize()

    # Extract features for all audio files in the dataset
    # ==================================================
    if params['flow']['extract_features']:
        app.feature_extraction()

    # Prepare feature normalizers
    # ==================================================
    if params['flow']['feature_normalizer']:
        app.feature_normalization()

    # System training
    # ==================================================
    if params['flow']['train_system']:
        app.system_training()

    # System evaluation in development mode
    if not args.mode or args.mode == 'dev':

        # System testing
        # ==================================================
        if params['flow']['test_system']:
            app.system_testing()

        # System evaluation
        # ==================================================
        if params['flow']['evaluate_system']:
            app.system_evaluation()

    # System evaluation with challenge data
    elif args.mode == 'challenge':
        # Set dataset to testing set for challenge
        params['dataset']['method'] = 'challenge_test'

        # Process dataset again, move correct parameters from dataset_parameters
        params.process_method_parameters('dataset')

        if params['general']['challenge_submission_mode']:
            # If in submission mode, save results in separate folder for easier access
            params['path']['recognizer'] = params.get_path(
                'path.recognizer_challenge_output')

        challenge_app = Task2AppCore(
            name='DCASE 2017::Detection of rare sound events / Baseline System',
            params=params,
            system_desc=params.get('description'),
            system_parameter_set_id=params.get('active_set'),
            setup_label='Evaluation setup',
            log_system_progress=params.get_path('general.log_system_progress'),
            show_progress_in_console=params.get_path(
                'general.print_system_progress'),
            use_ascii_progress_bar=params.get_path(
                'general.use_ascii_progress_bar'))

        # Initialize application
        if params['flow']['initialize']:
            challenge_app.initialize()

        # Extract features for all audio files in the dataset
        if params['flow']['extract_features']:
            challenge_app.feature_extraction()

        # System testing
        if params['flow']['test_system']:
            if params['general']['challenge_submission_mode']:
                params['general']['overwrite'] = True

            challenge_app.system_testing(single_file_per_fold=True)

            if params['general']['challenge_submission_mode']:
                challenge_app.ui.line(" ")
                challenge_app.ui.line(
                    "Results for the challenge data are stored at [" +
                    params['path']['recognizer_challenge_output'] + "]")
                challenge_app.ui.line(" ")

        # System evaluation if not in challenge submission mode
        if params['flow']['evaluate_system']:
            challenge_app.system_evaluation(single_file_per_fold=True)

    return 0
コード例 #15
0
def main(argv):
    numpy.random.seed(123456)  # let's make randomization predictable

    args = parse_args(argv)

    # Load default parameters from a file
    default_parameters_filename = 'parameters.yaml'

    if args.parameter_set:
        parameters_sets = args.parameter_set.split(',')
    else:
        parameters_sets = [None]

    for parameter_set in parameters_sets:
        # Initialize ParameterContainer
        params = ParameterContainer(
            project_base=os.path.dirname(os.path.realpath(__file__)))

        # Load default parameters from a file
        params.load(filename=default_parameters_filename)

        if args.parameter_override:
            # Override parameters from a file
            params.override(override=args.parameter_override)

        if parameter_set:
            # Override active_set
            params['active_set'] = parameter_set

        # Process parameters
        params.process()

        # Force overwrite
        if args.overwrite:
            params['general']['overwrite'] = True

        # Override dataset mode from arguments
        if args.mode == 'dev':
            # Set dataset to development
            params['dataset']['method'] = 'development'

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters(section='dataset')

        elif args.mode == 'challenge':
            # Set dataset to training set for challenge
            params['dataset']['method'] = 'challenge_train'
            params['general']['challenge_submission_mode'] = True
            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters(section='dataset')

        if args.node_mode:
            params['general']['log_system_progress'] = True
            params['general']['print_system_progress'] = False

        # Force ascii progress bar under Windows console
        if platform.system() == 'Windows':
            params['general']['use_ascii_progress_bar'] = True

        # Setup logging
        setup_logging(parameter_container=params['logging'])

        # app = CustomAppCore(name='DCASE 2017::Acoustic Scene Classification / Baseline System',
        #                     params=params,
        #                     system_desc=params.get('description'),
        #                     system_parameter_set_id=params.get('active_set'),
        #                     setup_label='Development setup',
        #                     log_system_progress=params.get_path('general.log_system_progress'),
        #                     show_progress_in_console=params.get_path('general.print_system_progress'),
        #                     use_ascii_progress_bar=params.get_path('general.use_ascii_progress_bar')
        #                     )

        # Show parameter set list and exit
        # if args.show_set_list:
        #     params_ = ParameterContainer(
        #         project_base=os.path.dirname(os.path.realpath(__file__))
        #     ).load(filename=default_parameters_filename)
        #
        #     if args.parameter_override:
        #         # Override parameters from a file
        #         params_.override(override=args.parameter_override)
        #     if 'sets' in params_:
        #         app.show_parameter_set_list(set_list=params_['sets'])
        #
        #     return

        # # Show dataset list and exit
        # if args.show_dataset_list:
        #     app.show_dataset_list()
        #     return

        # Show system parameters
        # if params.get_path('general.log_system_parameters') or args.show_parameters:
        #     app.show_parameters()

        # Show evaluated systems
        # if args.show_eval:
        #     app.show_eval()
        #     return

        # Initialize application
        # ==================================================
        # if params['flow']['initialize']:
        #     app.initialize()

        # Extract features for all audio files in the dataset
        # ==================================================
        # if params['flow']['extract_features']:
        #     app.feature_extraction()

        # Prepare feature normalizers
        # ==================================================
        # if params['flow']['feature_normalizer']:
        #     app.feature_normalization()

        # System training
        # ==================================================
        # if params['flow']['train_system']:
        #     app.system_training()

        # System evaluation
        # if not args.mode or args.mode == 'dev':

        # System testing
        # ==================================================
        # if params['flow']['test_system']:
        #     app.system_testing()

        # System evaluation
        # ==================================================
        # if params['flow']['evaluate_system']:
        #     app.system_evaluation()

        # System evaluation in challenge mode

    return 0
コード例 #16
0
def main(argv):
    numpy.random.seed(123456)  # let's make randomization predictable

    parser = argparse.ArgumentParser(
        prefix_chars='-+',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent('''\
            DCASE 2017
            Task 2: Rare Sound Event Detection
            A custom learner where you can create FNNs, CNNs, RNNs and CRNNs of any size and depth
            Code used in the paper:
            Convolutional Recurrent Neural Networks for Rare Sound Event Detection (Emre Cakir, Tuomas Virtanen)
            http://www.cs.tut.fi/sgn/arg/dcase2017/documents/challenge_technical_reports/DCASE2017_Cakir_104.pdf

            ---------------------------------------------
                Tampere University of Technology / Audio Research Group
                Author:  Emre Cakir ( [email protected] )


        '''))

    # Setup argument handling
    parser.add_argument('-m',
                        '--mode',
                        choices=('dev', 'challenge'),
                        default=None,
                        help="Selector for system mode",
                        required=False,
                        dest='mode',
                        type=str)

    parser.add_argument('-p',
                        '--parameters',
                        help='parameter file override',
                        dest='parameter_override',
                        required=False,
                        metavar='FILE',
                        type=argument_file_exists)

    parser.add_argument('-s',
                        '--parameter_set',
                        help='Parameter set id',
                        dest='parameter_set',
                        required=False,
                        type=str)

    parser.add_argument("-n",
                        "--node",
                        help="Node mode",
                        dest="node_mode",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_sets",
                        help="List of available parameter sets",
                        dest="show_set_list",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_datasets",
                        help="List of available datasets",
                        dest="show_dataset_list",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_parameters",
                        help="Show parameters",
                        dest="show_parameters",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_eval",
                        help="Show evaluated setups",
                        dest="show_eval",
                        action='store_true',
                        required=False)

    parser.add_argument("-o",
                        "--overwrite",
                        help="Overwrite mode",
                        dest="overwrite",
                        action='store_true',
                        required=False)

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + __version__)

    # Parse arguments
    args = parser.parse_args()

    # Load default parameters from a file
    default_parameters_filename = os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        os.path.splitext(os.path.basename(__file__))[0] + '.defaults.yaml')
    if args.parameter_set:
        parameters_sets = args.parameter_set.split(',')
    else:
        parameters_sets = [None]

    for parameter_set in parameters_sets:
        # Initialize ParameterContainer
        params = ParameterContainer(
            project_base=os.path.dirname(os.path.realpath(__file__)),
            path_structure={
                'feature_extractor':
                ['dataset', 'feature_extractor.parameters.*'],
                'feature_normalizer':
                ['dataset', 'feature_extractor.parameters.*'],
                'learner': [
                    'dataset', 'feature_extractor', 'feature_normalizer',
                    'feature_aggregator', 'learner'
                ],
                'recognizer': [
                    'dataset', 'feature_extractor', 'feature_normalizer',
                    'feature_aggregator', 'learner', 'recognizer'
                ],
            })

        # Load default parameters from a file
        params.load(filename=default_parameters_filename)

        if args.parameter_override:
            # Override parameters from a file
            params.override(override=args.parameter_override)

        if parameter_set:
            # Override active_set
            params['active_set'] = parameter_set

        # Process parameters
        params.process()

        # Force overwrite
        if args.overwrite:
            params['general']['overwrite'] = True

        # Override dataset mode from arguments
        if args.mode == 'dev':
            # Set dataset to development
            params['dataset']['method'] = 'development'

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters(section='dataset')

        elif args.mode == 'challenge':
            # Set dataset to training set for challenge
            params['dataset']['method'] = 'challenge_train'
            params['general']['challenge_submission_mode'] = True
            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters(section='dataset')

        if args.node_mode:
            params['general']['log_system_progress'] = True
            params['general']['print_system_progress'] = False

        # Force ascii progress bar under Windows console
        if platform.system() == 'Windows':
            params['general']['use_ascii_progress_bar'] = True

        # Setup logging
        setup_logging(parameter_container=params['logging'])

        app = CustomAppCore(
            name=
            'DCASE 2017::Rare Sound Event Detection / Custom Multifunctional Deep Learning',
            params=params,
            system_desc=params.get('description'),
            system_parameter_set_id=params.get('active_set'),
            setup_label='Development setup',
            log_system_progress=params.get_path('general.log_system_progress'),
            show_progress_in_console=params.get_path(
                'general.print_system_progress'),
            use_ascii_progress_bar=params.get_path(
                'general.use_ascii_progress_bar'))

        # Show parameter set list and exit
        if args.show_set_list:
            params_ = ParameterContainer(
                project_base=os.path.dirname(os.path.realpath(__file__))).load(
                    filename=default_parameters_filename)

            if args.parameter_override:
                # Override parameters from a file
                params_.override(override=args.parameter_override)
            if 'sets' in params_:
                app.show_parameter_set_list(set_list=params_['sets'])

            return

        # Show dataset list and exit
        if args.show_dataset_list:
            app.show_dataset_list()
            return

        # Show system parameters
        if params.get_path(
                'general.log_system_parameters') or args.show_parameters:
            app.show_parameters()

        # Show evaluated systems
        if args.show_eval:
            app.show_eval()
            return

        # Initialize application
        # ==================================================
        if params['flow']['initialize']:
            app.initialize()

        # Extract features for all audio files in the dataset
        # ==================================================
        if params['flow']['extract_features']:
            app.feature_extraction()

        # Prepare feature normalizers
        # ==================================================
        if params['flow']['feature_normalizer']:
            app.feature_normalization()

        # System training
        # ==================================================
        if params['flow']['train_system']:
            app.system_training()

        # System evaluation
        if not args.mode or args.mode == 'dev':

            # System testing
            # ==================================================
            if params['flow']['test_system']:
                app.system_testing()

            # System evaluation
            # ==================================================
            if params['flow']['evaluate_system']:
                app.system_evaluation()

        # System evaluation in challenge mode
        elif args.mode == 'challenge':
            # Set dataset to testing set for challenge
            params['dataset']['method'] = 'challenge_test'

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters('dataset')

            if params['general']['challenge_submission_mode']:
                # If in submission mode, save results in separate folder for easier access
                params['path']['recognizer'] = params.get_path(
                    'path.recognizer_challenge_output')

            challenge_app = CustomAppCore(
                name=
                'DCASE 2017::Rare Sound Event Detection / Custom Multifunctional Deep Learning',
                params=params,
                system_desc=params.get('description'),
                system_parameter_set_id=params.get('active_set'),
                setup_label='Evaluation setup',
                log_system_progress=params.get_path(
                    'general.log_system_progress'),
                show_progress_in_console=params.get_path(
                    'general.print_system_progress'),
                use_ascii_progress_bar=params.get_path(
                    'general.use_ascii_progress_bar'))
            # Initialize application
            if params['flow']['initialize']:
                challenge_app.initialize()

            # Extract features for all audio files in the dataset
            if params['flow']['extract_features']:
                challenge_app.feature_extraction()

            # System testing
            if params['flow']['test_system']:
                if params['general']['challenge_submission_mode']:
                    params['general']['overwrite'] = True

                challenge_app.system_testing()

                if params['general']['challenge_submission_mode']:
                    challenge_app.ui.line(" ")
                    challenge_app.ui.line(
                        "Results for the challenge are stored at [" +
                        params.get_path('path.recognizer_challenge_output') +
                        "]")
                    challenge_app.ui.line(" ")

            # System evaluation if not in challenge submission mode
            if params['flow']['evaluate_system']:
                challenge_app.system_evaluation()

    return 0
コード例 #17
0
def main(argv):
    numpy.random.seed(123456)  # let's make randomization predictable

    parser = argparse.ArgumentParser(
        prefix_chars='-+',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent('''\
            DCASE 2017
            Task 3: Sound Event Detection in Real-life Audio
            Baseline System
            ---------------------------------------------
                Tampere University of Technology / Audio Research Group
                Author:  Toni Heittola ( [email protected] )

            System description
                This is an baseline implementation for the D-CASE 2016, task 3 - Sound event detection in real life audio.
                The system has binary classifier for each included sound event class. The GMM classifier is trained with
                the positive and negative examples from the mixture signals, and classification is done between these
                two models as likelihood ratio. Acoustic features are MFCC+Delta+Acceleration (MFCC0 omitted).

        '''))

    # Setup argument handling
    parser.add_argument('-m',
                        '--mode',
                        choices=('dev', 'challenge'),
                        default=None,
                        help="Selector for system mode",
                        required=False,
                        dest='mode',
                        type=str)

    parser.add_argument('-p',
                        '--parameters',
                        help='parameter file override',
                        dest='parameter_override',
                        required=False,
                        metavar='FILE',
                        type=argument_file_exists)

    parser.add_argument('-s',
                        '--parameter_set',
                        help='Parameter set id',
                        dest='parameter_set',
                        required=False,
                        type=str)

    parser.add_argument("-n",
                        "--node",
                        help="Node mode",
                        dest="node_mode",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_sets",
                        help="List of available parameter sets",
                        dest="show_set_list",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_datasets",
                        help="List of available datasets",
                        dest="show_dataset_list",
                        action='store_true',
                        required=False)

    parser.add_argument("-show_parameters",
                        help="Show parameters",
                        dest="show_parameters",
                        action='store_true',
                        required=False)

    parser.add_argument("-o",
                        "--overwrite",
                        help="Overwrite mode",
                        dest="overwrite",
                        action='store_true',
                        required=False)

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + __version__)

    # Parse arguments
    args = parser.parse_args()

    # Load default parameters from a file
    default_parameters_filename = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'parameters',
        os.path.splitext(os.path.basename(__file__))[0] + '.defaults.yaml')
    if args.parameter_set:
        parameters_sets = args.parameter_set.split(',')
    else:
        parameters_sets = [None]

    for parameter_set in parameters_sets:
        # Initialize ParameterContainer
        params = ParameterContainer(
            project_base=os.path.dirname(os.path.realpath(__file__)))

        # Load default parameters from a file
        params.load(filename=default_parameters_filename)

        if args.parameter_override:
            # Override parameters from a file
            params.override(override=args.parameter_override)

        if parameter_set:
            # Override active_set
            params['active_set'] = parameter_set

        # Process parameters
        params.process()

        # Force overwrite
        if args.overwrite:
            params['general']['overwrite'] = True

        # Override dataset mode from arguments
        if args.mode == 'dev':
            # Set dataset to development
            params['dataset']['method'] = 'development'

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters(section='dataset')

        elif args.mode == 'challenge':
            # Set dataset to training set for challenge
            params['dataset']['method'] = 'challenge_train'
            params['general']['challenge_submission_mode'] = True

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters(section='dataset')

        if args.node_mode:
            params['general']['log_system_progress'] = True
            params['general']['print_system_progress'] = False

        # Setup logging
        setup_logging(parameter_container=params['logging'])

        app = Task3AppCore(
            name=
            'DCASE 2017::Sound Event Detection in Real-life Audio / Baseline System',
            params=params,
            system_desc=params.get('description'),
            system_parameter_set_id=params.get('active_set'),
            setup_label='Development setup',
            log_system_progress=params.get_path('general.log_system_progress'),
            show_progress_in_console=params.get_path(
                'general.print_system_progress'),
        )

        # Show parameter set list and exit
        if args.show_set_list:
            params_ = ParameterContainer(
                project_base=os.path.dirname(os.path.realpath(__file__))).load(
                    filename=default_parameters_filename)

            if args.parameter_override:
                # Override parameters from a file
                params_.override(override=args.parameter_override)
            if 'sets' in params_:
                app.show_parameter_set_list(set_list=params_['sets'])

            return

        # Show dataset list and exit
        if args.show_dataset_list:
            app.show_dataset_list()
            return

        # Show system parameters
        if params.get_path(
                'general.log_system_parameters') or args.show_parameters:
            app.show_parameters()

        # Initialize application
        # ==================================================
        if params['flow']['initialize']:
            app.initialize()

        # Extract features for all audio files in the dataset
        # ==================================================
        if params['flow']['extract_features']:
            app.feature_extraction()

        # Prepare feature normalizers
        # ==================================================
        if params['flow']['feature_normalizer']:
            app.feature_normalization()

        # System training
        # ==================================================
        if params['flow']['train_system']:
            app.system_training()

        # System evaluation in development mode
        if not args.mode or args.mode == 'dev':

            # System testing
            # ==================================================
            if params['flow']['test_system']:
                app.system_testing()

            # System evaluation
            # ==================================================
            if params['flow']['evaluate_system']:
                app.system_evaluation()

        # System evaluation with challenge data
        elif args.mode == 'challenge':
            # Set dataset to testing set for challenge
            params['dataset']['method'] = 'challenge_test'

            # Process dataset again, move correct parameters from dataset_parameters
            params.process_method_parameters('dataset')

            if params['general']['challenge_submission_mode']:
                # If in submission mode, save results in separate folder for easier access
                params['path']['recognizer'] = params.get_path(
                    'path.recognizer_challenge_output')

            challenge_app = Task3AppCore(
                name=
                'DCASE 2017::Sound Event Detection in Real-life Audio / Baseline System',
                params=params,
                system_desc=params.get('description'),
                system_parameter_set_id=params.get('active_set'),
                setup_label='Evaluation setup')
            # Initialize application
            if params['flow']['initialize']:
                challenge_app.initialize()

            # Extract features for all audio files in the dataset
            if params['flow']['extract_features']:
                challenge_app.feature_extraction()

            # System testing
            if params['flow']['test_system']:
                if params['general']['challenge_submission_mode']:
                    params['general']['overwrite'] = True

                challenge_app.system_testing()

                if params['general']['challenge_submission_mode']:
                    challenge_app.ui.line(" ")
                    challenge_app.ui.line(
                        "Results for the challenge data are stored at [" +
                        params['path']['recognizer_challenge_output'] + "]")
                    challenge_app.ui.line(" ")

            # System evaluation
            if params['flow']['evaluate_system']:
                challenge_app.system_evaluation()

    return 0
コード例 #18
0
def test_override():
    # Test #1
    params = ParameterContainer({
        'field1': 1,
        'field2': 2,
        'field3': 3,
        'field4': 4,
        'subdict': {
            'field1': [1, 2, 3, 4],
            'field2': 100,
        }
    })
    params.override({
        'field1': 11,
        'field3': 13,
        'subdict': {
            'field1': [2, 4],
            'field2': 300,
        }
    })
    nose.tools.eq_(params['field1'], 11)
    nose.tools.eq_(params['field2'], 2)
    nose.tools.eq_(params['field3'], 13)
    nose.tools.eq_(params['field4'], 4)
    nose.tools.eq_(params['subdict']['field1'], [2, 4])
    nose.tools.eq_(params['subdict']['field2'], 300)

    # Test #2
    params = ParameterContainer({
        'field1': 1,
        'field2': 2,
        'field3': 3,
        'field4': 4,
        'subdict': {
            'field1': [1, 2, 3, 4],
            'field2': 100,
        }
    })
    params.override(
        json.dumps({
            'field1': 11,
            'field3': 13,
            'subdict': {
                'field1': [2, 4],
                'field2': 300,
            }
        }))

    nose.tools.eq_(params['field1'], 11)
    nose.tools.eq_(params['field2'], 2)
    nose.tools.eq_(params['field3'], 13)
    nose.tools.eq_(params['field4'], 4)
    nose.tools.eq_(params['subdict']['field1'], [2, 4])
    nose.tools.eq_(params['subdict']['field2'], 300)

    # Test #3
    params = ParameterContainer({
        'field1': 1,
        'field2': 2,
        'field3': 3,
        'field4': 4,
        'subdict': {
            'field1': [1, 2, 3, 4],
            'field2': 100,
        }
    })
    tmp = tempfile.NamedTemporaryFile('r+', suffix='.yaml', dir='/tmp')
    try:
        tmp.write('field1: 10\n')
        tmp.write('field2: 20\n')
        tmp.write('field3: 30\n')
        tmp.write('field4: 40\n')
        tmp.seek(0)

        params.override(tmp.name)

        nose.tools.eq_(params['field1'], 10)
        nose.tools.eq_(params['field2'], 20)
        nose.tools.eq_(params['field3'], 30)
        nose.tools.eq_(params['field4'], 40)
    finally:
        tmp.close()