def log_run(split: str, epoch: int, writer: tf.summary.SummaryWriter,
            label_names: Sequence[str], metrics: MutableMapping[str, float],
            heaps: Mapping[str,
                           Mapping[int,
                                   List[HeapItem]]], cm: np.ndarray) -> None:
    """Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a
    single epoch run to Tensorboard.

    Args:
        metrics: dict, keys already prefixed with {split}/
    """
    per_class_recall = recall_from_confusion_matrix(cm, label_names)
    metrics.update(prefix_all_keys(per_class_recall, f'{split}/label_recall/'))

    # log metrics
    for metric, value in metrics.items():
        tf.summary.scalar(metric, value, epoch)

    # log confusion matrix
    cm_fig = plot_utils.plot_confusion_matrix(cm,
                                              classes=label_names,
                                              normalize=True)
    cm_fig_img = tf.convert_to_tensor(fig_to_img(cm_fig)[np.newaxis, ...])
    tf.summary.image(f'confusion_matrix/{split}', cm_fig_img, step=epoch)

    # log tp/fp/fn images
    for heap_type, heap_dict in heaps.items():
        log_images_with_confidence(heap_dict,
                                   label_names,
                                   epoch=epoch,
                                   tag=f'{split}/{heap_type}')
    writer.flush()
Пример #2
0
def create_summary(writer: tf.summary.SummaryWriter,
                   optimizer_name: str,
                   nb_img_utilisees,
                   optimizer_parameters: Dict,
                   loss: str,
                   metriques_utilisees: List[str],
                   but_essai: str,
                   informations_additionnelles: str,
                   id: str,
                   dataset_name: str = "",
                   taille_x_img: int = 1600,
                   taille_y_img: int = 900,
                   taille_x_img_redim: int = 400,
                   taille_y_img_redim: int = 225,
                   batch_size=10,
                   nb_img_tot=173959,
                   nb_epochs=1):
    markdown = f"""# Résumé de l'entrainement du {id}

Entrainement sur {dataset_name} ({nb_img_tot} images ; {min(nb_img_utilisees,nb_img_tot)} utilisées) avec des images de taille {taille_x_img} px par {taille_y_img} px redimensionnées à {taille_x_img_redim} px x {taille_y_img_redim} px
Batch size de {batch_size}


## Paramètres d'entrainement

Entrainement sur {nb_epochs} epochs

Optimisateur {optimizer_name} avec les paramètres :\n"""
    for k, v in optimizer_parameters.items():
        markdown += f"{k} : {v}"
    markdown += f"""\nLoss : {loss}

Métriques : """
    markdown += ", ".join([f"{metrique}" for metrique in metriques_utilisees])
    markdown += f"""\n## Description de l'essai\n\n{but_essai}\n\n{informations_additionnelles}"""

    with writer.as_default():
        tf.summary.text("Resume", markdown, step=0)
        writer.flush()
Пример #3
0
def save_dict_to_tensorboard(event_writer: tf.summary.SummaryWriter,
                             dict: Dict, step: int):
    for key, val in dict.items():
        with event_writer.as_default():
            tf.summary.scalar(name=key, data=val, step=step)
    event_writer.flush()
Пример #4
0
def write_scalars(writer: tf.summary.SummaryWriter, scalar_dict, step):
    with writer.as_default():
        for (k, v) in scalar_dict.items():
            tf.summary.scalar(k, v, step=step)
    writer.flush()