Exemple #1
0
                     verbose=5)

model.fit(X_train, y_train)

best_params = model.best_params_

score_test = model.score(X_test, y_test)

score_train = model.score(X_train, y_train)

y_hat = model.predict(X_test)

filename_model = Directory.model_filename(method,
                                          language,
                                          library,
                                          normalization,
                                          score_test,
                                          augmentation=augment,
                                          json=False) + 'model.json'

JSON.create_json_file(file=filename_model,
                      data=globals()['build_' + method]().to_json())

# SALVA ACURÁCIAS E PARAMETROS
Model.dump_grid(Directory.model_filename(method,
                                         language,
                                         library,
                                         normalization,
                                         score_test,
                                         augmentation=augment),
                model=model,
args = Terminal.get_args()

language = 'portuguese'
method = 'svm'
library = 'psf'
people = args['people']
segments = args['segments']
normalization = args['normalization']
augment = args['augmentation']
sampling_rate = 24000
random_state = 42

filename_holder = Directory.model_filename(method=method,
                                           language=language,
                                           library=library,
                                           normalization=normalization,
                                           augmentation=augment,
                                           json=False,
                                           models=True)

info = json.load(open(filename_holder + 'info.json', 'r'))
scaler = load(open(filename_holder + 'scaler.pkl', 'rb'))

model = load(open(filename_holder + 'model.h5', 'rb'))

signal, rate = librosa.load(args['inferencia'], sr=sampling_rate)

# signal = Audio.trim(signal)

segment_time = 5
signal = signal[:len(signal) - len(signal) % (rate * segment_time)]
Exemple #3
0
model.fit(X_train, y_train)

best_params = model.best_params_

score_test = model.score(X_test, y_test)

score_train = model.score(X_train, y_train)

y_hat = model.predict(X_test)

filename_ps = Directory.verify_people_segments(
    people=people, segments=segments)

# SALVA ACURÁCIAS E PARAMETROS
Model.dump_grid(
    Directory.model_filename(
        'svm', language, library, normalization, score_test, augmentation=augment),
    model=model,
    language=language,
    method='Support Vector Machines',
    normalization=normalization,
    sampling_rate=sampling_rate,
    augmentation=augment,
    shape=X_train.shape,
    seed=random_state,
    library=library,
    sizes=[len(X_train), len(X_valid), len(X_test)],
    score_train=score_train,
    score_test=score_test,
)
Exemple #4
0
model = GridSearchCV(
    estimator=kc, param_grid=param_grid, n_jobs=-1, cv=5)


model.fit(X_train, y_train)

best_params = model.best_params_

score_test = model.score(X_test, y_test)

score_train = model.score(X_train, y_train)

y_hat = model.predict(X_test)

filename_model = Directory.model_filename(
    'cnn', language, library, normalization, score_test, json=False)+'model.json'

JSON.create_json_file(
    file=filename_model,
    data=build_model().to_json()
)

# SALVA ACURÁCIAS E PARAMETROS
Model.dump_grid(
    Directory.model_filename(
        'cnn', language, library, normalization, score_test),
    model=model,
    language=language,
    method='CNN',
    normalization=normalization,
    sampling_rate=sampling_rate,