def evaluate(model, data_input, gold_output): predictions = model.predict( data_input, batch_size=keras_models.model_params['batch_size'], verbose=1) if len(predictions.shape) == 3: predictions_classes = np.argmax(predictions, axis=2) train_batch_f1 = metrics.accuracy_per_sentence(predictions_classes, gold_output) print("Results (per sentence): ", train_batch_f1) train_y_properties_stream = gold_output.reshape(gold_output.shape[0] * gold_output.shape[1]) predictions_classes = predictions_classes.reshape( predictions_classes.shape[0] * predictions_classes.shape[1]) class_mask = train_y_properties_stream != 0 train_y_properties_stream = train_y_properties_stream[class_mask] predictions_classes = predictions_classes[class_mask] else: predictions_classes = np.argmax(predictions, axis=1) train_y_properties_stream = gold_output accuracy = metrics.accuracy(predictions_classes, train_y_properties_stream) micro_scores = metrics.compute_micro_PRF(predictions_classes, train_y_properties_stream, empty_label=keras_models.p0_index) print("Results: Accuracy: ", accuracy) print("Results: Micro-Average F1: ", micro_scores) return predictions_classes, predictions
def f_train(params): model = getattr(keras_models, model_name)(params, embedding_matrix, max_sent_len, n_out) callback_history = model.fit(train_as_indices[:-1], [train_y_properties_one_hot], epochs=20, batch_size=keras_models.model_params['batch_size'], verbose=1, validation_data=( val_as_indices[:-1], val_y_properties_one_hot), callbacks=[callbacks.EarlyStopping(monitor="val_loss", patience=1, verbose=1)]) predictions = model.predict(val_as_indices[:-1], batch_size=16, verbose=1) predictions_classes = np.argmax(predictions, axis=1) _, _, acc = metrics.compute_micro_PRF(predictions_classes, val_as_indices[-1]) return {'loss': -acc, 'status': hy.STATUS_OK}