Esempio n. 1
0
def test_get_data_from_file():
    # train=True
    data_generator = DataGenerator(dataset,
                                   feature_extractor,
                                   folds=['all'],
                                   batch_size=2,
                                   shuffle=False)

    for j in range(3):
        X, Y = data_generator.get_data_from_file(j)
        assert np.allclose(X, X_gt[j])
        assert np.allclose(Y, Y_gt[j])
Esempio n. 2
0
def generate_demo(n_clicks, list_of_contents, fold_ix, model_path,
                  list_of_names, list_of_dates, sr):
    print('generate demo')
    ctx = dash.callback_context
    button_id = ctx.triggered[0]['prop_id'].split('.')[0]
    print(button_id, n_clicks)
    if (n_clicks is not None) & (button_id == 'btn_run_demo'):
        fold_name = dataset.fold_list[fold_ix]
        exp_folder_fold = conv_path(os.path.join(model_path, fold_name))
        scaler_path = os.path.join(exp_folder_fold, 'scaler.pickle')
        scaler = load_pickle(scaler_path)

        data_generator_test = DataGenerator(
            dataset,
            feature_extractor,
            folds=[fold_name],
            batch_size=params['train']['batch_size'],
            shuffle=True,
            train=False,
            scaler=scaler)

        n_files = len(data_generator_test.audio_file_list)
        ix = np.random.randint(n_files)

        fold_name = dataset.fold_list[fold_ix]
        exp_folder_fold = conv_path(os.path.join(model_path, fold_name))

        X_features, Y_file = data_generator_test.get_data_from_file(ix)

        with graph.as_default():
            model_container.load_model_weights(exp_folder_fold)
            Y_features = model_container.model.predict(X_features)

        fig_demo = generate_figure_features(X_features, Y_features,
                                            dataset.label_list)

        audio_file = data_generator_test.audio_file_list[ix]
        audio_data, sr = sf.read(audio_file['file_original'])

        class_ix = np.argmax(Y_file[0])
        file_label = dataset.label_list[class_ix]

        return [
            fig_demo, {
                'autoPlay': False,
                'src': encode_audio(audio_data, sr)
            },
            'ground-truth: %s' % file_label
        ]

    if button_id == 'upload-data':
        fold_name = dataset.fold_list[fold_ix]
        exp_folder_fold = conv_path(os.path.join(model_path, fold_name))
        scaler_path = os.path.join(exp_folder_fold, 'scaler.pickle')
        scaler = load_pickle(scaler_path)

        filename = conv_path('upload.wav')
        data = list_of_contents.encode("utf8").split(b";base64,")[1]
        with open(filename, "wb") as fp:
            fp.write(base64.decodebytes(data))

        X_feat = feature_extractor.calculate(filename)
        X_feat = scaler.transform(X_feat)
        with graph.as_default():
            Y_t = model_container.model.predict(X_feat)

        label_list = dataset.label_list
        figure_features = generate_figure_features(X_feat, Y_t, label_list)
        return [
            figure_features, {
                'autoPlay': False,
                'src': list_of_contents
            }, ""
        ]

    X_feat = np.zeros((10, 128, 64))
    Y_t = np.zeros((10, 10))
    label_list = [] * 10
    figure_features = generate_figure_features(X_feat, Y_t, label_list)

    return [figure_features, {'autoPlay': False, 'src': ""}, ""]