params = json.load(f)
    config['audio_rep'] = params

    # set patch parameters
    config['xInput'] = config['n_frames']
    config['yInput'] = config['audio_rep']['n_mels']

    # load audio representation paths
    file_index = config_file.DATA_FOLDER + config[
        'audio_representation_folder'] + 'index.tsv'
    [audio_repr_paths,
     id2audio_repr_path] = shared.load_id2audioReprPath(file_index)

    # load training ground truth
    file_ground_truth_train = config_file.DATA_FOLDER + config['gt_train']
    [all_ids_train, id2gt_train] = shared.load_id2gt(file_ground_truth_train)
    [_, id2label_train] = shared.load_id2label(file_ground_truth_train)
    label2ids_train = shared.load_label2ids(id2label_train)

    # load test ground truth
    file_ground_truth_test = config_file.DATA_FOLDER + config['gt_test']
    [all_ids_test, id2gt_test] = shared.load_id2gt(file_ground_truth_test)
    [_, id2label_test] = shared.load_id2label(file_ground_truth_test)
    label2ids_test = shared.load_label2ids(id2label_test)

    # set output according to the experimental setup
    classes_vector = list(range(config['num_classes_dataset']))

    # tensorflow: define the model
    with tf.name_scope('model'):
예제 #2
0
    # which experiment we want to evaluate?
    # Use the -l functionality to ensamble models: python arg.py -l 1234 2345 3456 4567
    parser = argparse.ArgumentParser()
    parser.add_argument('-l',
                        '--list',
                        nargs='+',
                        help='List of models to evaluate',
                        required=True)
    args = parser.parse_args()
    models = args.list

    # load all audio representation paths
    [audio_repr_paths, id2audio_repr_path] = shared.load_id2path(FILE_INDEX)

    # load ground truth
    [ids, id2gt] = shared.load_id2gt(FILE_GROUND_TRUTH_TEST)
    print('# Test set', len(ids))

    array_cost, pred_array, id_array = [], None, None

    for model in models:

        experiment_folder = config_file.DATA_FOLDER + 'experiments/' + str(
            model) + '/'
        config = json.load(open(experiment_folder + 'config.json'))
        print('Experiment: ' + str(model))
        print('\n' + str(config))

        # pescador: define (finite, batched & parallel) streamer
        pack = [config, 'overlap_sampling', config['n_frames'], False]
        streams = [
        params = json.load(f)
    config['audio_rep'] = params

    # set patch parameters
    config['xInput'] = config['n_frames']
    config['yInput'] = config['audio_rep']['n_mels']

    # load audio representation paths
    file_index = config_file.DATA_FOLDER + config[
        'audio_representation_folder'] + 'index.tsv'
    [audio_repr_paths,
     id2audio_repr_path] = shared.load_id2audioReprPath(file_index)

    # load training ground truth
    file_ground_truth_train = config_file.DATA_FOLDER + config['gt_train']
    [all_ids_train, id2gt_train] = shared.load_id2gt(file_ground_truth_train)
    [_, id2label_train] = shared.load_id2label(file_ground_truth_train)
    label2ids_train = shared.load_label2ids(id2label_train)

    # load validation ground truth
    file_ground_truth_val = config_file.DATA_FOLDER + config['gt_val']
    [all_ids_val, id2gt_val] = shared.load_id2gt(file_ground_truth_val)
    [_, id2label_val] = shared.load_id2label(file_ground_truth_val)
    label2ids_val = shared.load_label2ids(id2label_val)

    # set output according to the experimental setup
    config['classes_vector'] = list(range(config['num_classes_dataset']))

    # save experimental settings
    experiment_id = 'fold_' + str(config_file.FOLD) + '_' + str(
        shared.get_epoch_time())