Example #1
0
def make_session_end_summary(status: str, end_time_secs: Optional[int] = None):
    """

    Args:
        status: outcome of this run, one of of 'UNKNOWN', 'SUCCESS', 'FAILURE', 'RUNNING'
        end_time_secs: optional ending time in seconds

    Returns:

    """
    status = Status.DESCRIPTOR.values_by_name[
        f"STATUS_{status.upper()}"].number
    if end_time_secs is None:
        import time

        end_time_secs = int(time.time())

    session_end_summary = SessionEndInfo(status=status,
                                         end_time_secs=end_time_secs)
    session_end_content = HParamsPluginData(
        session_end_info=session_end_summary, version=PLUGIN_DATA_VERSION)
    session_end_summary_metadata = SummaryMetadata(
        plugin_data=SummaryMetadata.PluginData(
            plugin_name=PLUGIN_NAME,
            content=session_end_content.SerializeToString()))
    session_end_summary = Summary(value=[
        Summary.Value(tag=SESSION_END_INFO_TAG,
                      metadata=session_end_summary_metadata)
    ])

    return session_end_summary
Example #2
0
def make_session_start_summary(
    hparam_values,
    group_name: Optional[str] = None,
    start_time_secs: Optional[int] = None,
):
    """Assign values to the hyperparameters in the context of this session.

    Args:
        hparam_values: a dict of ``hp_name`` -> ``hp_value`` mappings
        group_name: optional group name for this session
        start_time_secs: optional starting time in seconds

    Returns:

    """
    if start_time_secs is None:
        import time

        start_time_secs = int(time.time())
    session_start_info = SessionStartInfo(group_name=group_name,
                                          start_time_secs=start_time_secs)

    for hp_name, hp_value in hparam_values.items():
        # Logging a None would raise an exception when setting session_start_info.hparams[hp_name].number_value = None.
        # Logging a float.nan instead would work, but that run would not show at all in the tensorboard hparam plugin.
        # The best thing to do here is to skip that value, it will show as a blank cell in the table view of the
        # tensorboard plugin. However, that run would not be shown in the parallel coord or in the scatter plot view.
        if hp_value is None:
            loguru.warning(
                f"Hyper parameter {hp_name} is `None`: the tensorboard hp plugin "
                f"will show this run in table view, but not in parallel coordinates "
                f"view or in scatter plot matrix view")
            continue

        if isinstance(hp_value, string_types):
            session_start_info.hparams[hp_name].string_value = hp_value
            continue

        if isinstance(hp_value, bool):
            session_start_info.hparams[hp_name].bool_value = hp_value
            continue

        if not isinstance(hp_value, (int, float)):
            hp_value = make_np(hp_value)[0]

        session_start_info.hparams[hp_name].number_value = hp_value

    session_start_content = HParamsPluginData(
        session_start_info=session_start_info, version=PLUGIN_DATA_VERSION)
    session_start_summary_metadata = SummaryMetadata(
        plugin_data=SummaryMetadata.PluginData(
            plugin_name=PLUGIN_NAME,
            content=session_start_content.SerializeToString()))
    session_start_summary = Summary(value=[
        Summary.Value(tag=SESSION_START_INFO_TAG,
                      metadata=session_start_summary_metadata)
    ])

    return session_start_summary
Example #3
0
def add_video(name, imgs, epoch, tb_writer):
    # Make a video from the given imgs and output to tensorboard.
    strt = time.time()
    vid_imgs = np.array(imgs)  # TxHxWxC, C=RGBA
    video = tensorboardX.summary.make_video(vid_imgs, fps=12)  # slower: fps<24
    vs = Summary(value=[Summary.Value(tag=name+'_video', image=video)])
    tb_writer.file_writer.add_summary(vs, global_step=epoch)
    # add_video takes NxTxHxWxC and fails on RGBA
    #tb_writer.add_video(name+'_video', vid_tensor=vid_imgs, fps=4)
    print('{:s} video took {:2f}'.format(name, time.time()-strt))
Example #4
0
def add_video(name, imgs, epoch, tb_writer):
    strt = time.time()
    from tensorboardX import summary
    from tensorboardX.proto.summary_pb2 import Summary
    vid_imgs = np.array(imgs)  # TxHxWxC, C=RGBA
    video = summary.make_video(vid_imgs, fps=24)
    vs = Summary(value=[Summary.Value(tag=name + '_video', image=video)])
    tb_writer.file_writer.add_summary(vs, global_step=epoch)
    # add_video takes NxTxHxWxC and fails on RGBA
    #tb_writer.add_video(name+'_video', vid_tensor=vid_imgs, fps=4)
    print('{:s} video took {:2f}'.format(name, time.time() - strt))
Example #5
0
def add_video(writer,
              tag,
              tensor_thwc,
              global_step=None,
              fps=30,
              walltime=None):
    """found that add_video from tbX is buggy"""
    tag = _clean_tag(tag)
    video = make_video(tensor_thwc, fps)
    summary = Summary(value=[Summary.Value(tag=tag, image=video)])
    writer.file_writer.add_summary(summary, global_step, walltime)
 def add_image(self, tag, img_np, global_step=None):
     """Add image data to summary.
         Note that this requires the ``pillow`` package.
         Args:
             tag (string): Data identifier
             img_np (np.ndarray): Image data, uint8
             global_step (int): Global step value to record
         Shape:
             img_tensor: :math:`(H, W, 3)`.
         """
     tag = tensorboardX.summary._clean_tag(tag)
     image = tensorboardX.summary.make_image(img_np)
     self.file_writer.add_summary(Summary(value=[Summary.Value(tag=tag, image=image)]), global_step)
Example #7
0
def clips_to_video(clips, h, w, c):
    # encode sequence of images into gif string
    clip = concatenate_videoclips(clips)

    filename = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name

    # moviepy >= 1.0.0 use logger=None to suppress output.
    try:
        clip.write_gif(filename, verbose=False, logger=None)
    except TypeError:
        get_logger().warning(
            "Upgrade to moviepy >= 1.0.0 to suppress the progress bar.")
        clip.write_gif(filename, verbose=False)

    with open(filename, "rb") as f:
        tensor_string = f.read()

    try:
        os.remove(filename)
    except OSError:
        get_logger().warning(
            "The temporary file used by moviepy cannot be deleted.")

    return TBXSummary.Image(height=h,
                            width=w,
                            colorspace=c,
                            encoded_image_string=tensor_string)
Example #8
0
def image(tag, tensor, rescale=1, dataformats="CHW"):
    """Outputs a `Summary` protocol buffer with images. The summary has up to
    `max_images` summary values containing images. The images are built from
    `tensor` which must be 3-D with shape `[height, width, channels]` and where
    `channels` can be:

    *  1: `tensor` is interpreted as Grayscale.
    *  3: `tensor` is interpreted as RGB.
    *  4: `tensor` is interpreted as RGBA.

    Args:
      tag: A name for the generated node. Will also serve as a series name in
        TensorBoard.
      tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,
        channels]` where `channels` is 1, 3, or 4.
        'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8).
        The image() function will scale the image values to [0, 255] by applying
        a scale factor of either 1 (uint8) or 255 (float32).
    Returns:
      A scalar `Tensor` of type `string`. The serialized `Summary` protocol
      buffer.
    """
    tag = tbxsummary._clean_tag(tag)
    tensor = tbxmake_np(tensor)
    tensor = convert_to_HWC(tensor, dataformats)
    # Do not assume that user passes in values in [0, 255], use data type to detect
    if tensor.dtype != np.uint8:
        tensor = (tensor * 255.0).astype(np.uint8)

    image = tbxsummary.make_image(tensor, rescale=rescale)
    return TBXSummary(value=[TBXSummary.Value(tag=tag, image=image)])
Example #9
0
def text(tag, text):
    plugin_data = SummaryMetadata.PluginData(
        plugin_name="text",
        content=TextPluginData(version=0).SerializeToString())
    smd = SummaryMetadata(plugin_data=plugin_data)
    string_val = []
    for item in text:
        string_val.append(item.encode(encoding="utf_8"))
    tensor = TensorProto(
        dtype="DT_STRING",
        string_val=string_val,
        tensor_shape=TensorShapeProto(
            dim=[TensorShapeProto.Dim(size=len(text))]),
    )

    return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
Example #10
0
 def test_event_file_writer_roundtrip(self):
     _TAGNAME = 'dummy'
     _DUMMY_VALUE = 42
     logdir = self.get_temp_dir()
     w = EventFileWriter(logdir)
     summary = Summary(
         value=[Summary.Value(tag=_TAGNAME, simple_value=_DUMMY_VALUE)])
     fakeevent = event_pb2.Event(summary=summary)
     w.add_event(fakeevent)
     w.close()
     event_files = sorted(glob.glob(os.path.join(logdir, '*')))
     self.assertEqual(len(event_files), 1)
     r = PyRecordReader_New(event_files[0])
     r.GetNext()  # meta data, so skip
     r.GetNext()
     self.assertEqual(fakeevent.SerializeToString(), r.record())
Example #11
0
def tf_summary_to_dict(tf_summary_str_or_pb, namespace=""):
    """Convert a Tensorboard Summary to a dictionary

    Accepts either a tensorflow.summary.Summary
    or one encoded as a string.
    """
    values = {}
    if isinstance(tf_summary_str_or_pb, Summary):
        summary_pb = tf_summary_str_or_pb
    elif isinstance(tf_summary_str_or_pb, Event):
        summary_pb = tf_summary_str_or_pb.summary
        values["global_step"] = tf_summary_str_or_pb.step
        values["_timestamp"] = tf_summary_str_or_pb.wall_time
    else:
        summary_pb = Summary()
        summary_pb.ParseFromString(tf_summary_str_or_pb)

    for value in summary_pb.value:
        kind = value.WhichOneof("value")
        if kind == "simple_value":
            values[namespaced_tag(value.tag, namespace)] = value.simple_value
        elif kind == "image":
            from PIL import Image
            image = wandb.Image(
                Image.open(six.BytesIO(value.image.encoded_image_string)))
            tag_idx = value.tag.rsplit('/', 1)
            if len(tag_idx) > 1 and tag_idx[1].isdigit():
                tag, idx = tag_idx
                values.setdefault(history_image_key(tag), []).append(image)
            else:
                values[history_image_key(value.tag)] = image
        # Coming soon...
        # elif kind == "audio":
        #    audio = wandb.Audio(six.BytesIO(value.audio.encoded_audio_string),
        #                        sample_rate=value.audio.sample_rate, content_type=value.audio.content_type)
        elif kind == "histo":
            first = value.histo.bucket_limit[0] + \
                value.histo.bucket_limit[0] - value.histo.bucket_limit[1]
            last = value.histo.bucket_limit[-2] + \
                value.histo.bucket_limit[-2] - value.histo.bucket_limit[-3]
            np_histogram = (list(value.histo.bucket),
                            [first] + value.histo.bucket_limit[:-1] + [last])
            values[namespaced_tag(
                value.tag)] = wandb.Histogram(np_histogram=np_histogram)

    return values
Example #12
0
def summary_image(img):
    output = io.BytesIO()
    img.save(output, format='PNG')
    encoded = output.getvalue()
    output.close()
    return Summary.Image(height=img.height,
                         width=img.width,
                         colorspace=len(img.getbands()),
                         encoded_image_string=encoded)
Example #13
0
def make_video(tensor, fps):
    t, h, w, c = tensor.shape
    with tempfile.NamedTemporaryFile() as f:
        filename = f.name + '.gif'
    images = [tensor[t] for t in range(tensor.shape[0])]
    imageio.mimwrite(filename, images, duration=0.04)
    with open(filename, 'rb') as f:
        tensor_string = f.read()
    try:
        os.remove(filename)
    except OSError:
        pass

    return Summary.Image(height=h,
                         width=w,
                         colorspace=c,
                         encoded_image_string=tensor_string)
Example #14
0
def make_experiment_summary(hparam_infos, metric_infos, experiment):
    """Define hyperparameters and metrics.

    Args:
        hparam_infos: information about all hyperparameters (name, description, type etc.),
            list of dicts containing 'name' (required), 'type', 'description', 'display_name',
            'domain_discrete', 'domain_interval'
        metric_infos: information about all metrics (tag, description etc.),
            list of dicts containing 'tag' (required), 'dataset_type', 'description', 'display_name'
        experiment: dict containing 'name' (required), 'description', 'time_created_secs', 'user'

    Returns:

    """
    def make_hparam_info(hparam):
        data_type = hparam.get("type")
        if hparam.get("type") is None:
            data_type = DataType.DATA_TYPE_UNSET
        elif hparam.get("type") in string_types:
            data_type = DataType.DATA_TYPE_STRING
        elif hparam.get("type") is bool:
            data_type = DataType.DATA_TYPE_BOOL
        elif hparam.get("type") in (float, int):
            data_type = DataType.DATA_TYPE_FLOAT64
        return HParamInfo(
            name=hparam["name"],
            type=data_type,
            description=hparam.get("description"),
            display_name=hparam.get("display_name"),
            domain_discrete=hparam.get("domain_discrete"),
            domain_interval=hparam.get("domain_interval"),
        )

    def make_metric_info(metric):
        return MetricInfo(
            name=MetricName(tag=metric["tag"]),
            dataset_type=DatasetType.Value(
                f'DATASET_{metric.get("dataset_type", "UNKNOWN").upper()}'),
            description=metric.get("description"),
            display_name=metric.get("display_name"),
        )

    def make_experiment_info(experiment, metric_infos, hparam_infos):
        return Experiment(
            name=experiment["name"],
            description=experiment.get("description"),
            time_created_secs=experiment.get("time_created_secs"),
            user=experiment.get("user"),
            metric_infos=metric_infos,
            hparam_infos=hparam_infos,
        )

    metric_infos = [make_metric_info(m) for m in metric_infos]
    hparam_infos = [make_hparam_info(hp) for hp in hparam_infos]
    experiment = make_experiment_info(experiment, metric_infos, hparam_infos)

    experiment_content = HParamsPluginData(experiment=experiment,
                                           version=PLUGIN_DATA_VERSION)
    experiment_summary_metadata = SummaryMetadata(
        plugin_data=SummaryMetadata.PluginData(
            plugin_name=PLUGIN_NAME,
            content=experiment_content.SerializeToString()))
    experiment_summary = Summary(value=[
        Summary.Value(tag=EXPERIMENT_TAG, metadata=experiment_summary_metadata)
    ])

    return experiment_summary
Example #15
0
 def _video(tag, vid):
     # noinspection PyProtectedMember
     tag = tbxsummary._clean_tag(tag)
     return TBXSummary(value=[TBXSummary.Value(tag=tag, image=vid)])
Example #16
0
def tf_summary_to_dict(tf_summary_str_or_pb, namespace=""):
    """Convert a Tensorboard Summary to a dictionary

    Accepts either a tensorflow.summary.Summary
    or one encoded as a string.
    """
    values = {}
    if hasattr(tf_summary_str_or_pb, "summary"):
        summary_pb = tf_summary_str_or_pb.summary
        values[namespaced_tag("global_step",
                              namespace)] = tf_summary_str_or_pb.step
        values["_timestamp"] = tf_summary_str_or_pb.wall_time
    elif isinstance(tf_summary_str_or_pb, (str, bytes, bytearray)):
        summary_pb = Summary()
        summary_pb.ParseFromString(tf_summary_str_or_pb)
    else:
        if not hasattr(tf_summary_str_or_pb, "value"):
            raise ValueError(
                "Can't log %s, only Event, Summary, or Summary proto buffer strings are accepted"
            )
        else:
            summary_pb = tf_summary_str_or_pb

    for value in summary_pb.value:
        kind = value.WhichOneof("value")
        if kind in IGNORE_KINDS:
            continue
        if kind == "simple_value":
            values[namespaced_tag(value.tag, namespace)] = value.simple_value
        elif kind == "tensor":
            values[namespaced_tag(value.tag,
                                  namespace)] = make_ndarray(value.tensor)
        elif kind == "image":
            from PIL import Image
            img_str = value.image.encoded_image_string
            # Supports gifs from TboardX
            if img_str.startswith(b"GIF"):
                image = wandb.Video(six.BytesIO(img_str), format="gif")
            else:
                image = wandb.Image(Image.open(six.BytesIO(img_str)))
            tag_idx = value.tag.rsplit('/', 1)
            if len(tag_idx) > 1 and tag_idx[1].isdigit():
                tag, idx = tag_idx
                values.setdefault(history_image_key(tag, namespace),
                                  []).append(image)
            else:
                values[history_image_key(value.tag, namespace)] = [image]
        # Coming soon...
        # elif kind == "audio":
        #    audio = wandb.Audio(six.BytesIO(value.audio.encoded_audio_string),
        #                        sample_rate=value.audio.sample_rate, content_type=value.audio.content_type)
        elif kind == "histo":
            tag = namespaced_tag(value.tag, namespace)
            if len(value.histo.bucket_limit) >= 3:
                first = value.histo.bucket_limit[0] + \
                    value.histo.bucket_limit[0] - value.histo.bucket_limit[1]
                last = value.histo.bucket_limit[-2] + \
                    value.histo.bucket_limit[-2] - value.histo.bucket_limit[-3]
                np_histogram = (list(value.histo.bucket), [first] +
                                value.histo.bucket_limit[:-1] + [last])
                try:
                    #TODO: we should just re-bin if there are too many buckets
                    values[tag] = wandb.Histogram(np_histogram=np_histogram)
                except ValueError:
                    wandb.termwarn(
                        "Not logging key \"{}\".  Histograms must have fewer than {} bins"
                        .format(tag, wandb.Histogram.MAX_LENGTH),
                        repeat=False)
            else:
                #TODO: is there a case where we can render this?
                wandb.termwarn(
                    "Not logging key \"{}\".  Found a histogram with only 2 bins."
                    .format(tag),
                    repeat=False)
        elif value.tag == "_hparams_/session_start_info":
            if wandb.util.get_module("tensorboard.plugins.hparams"):
                from tensorboard.plugins.hparams import plugin_data_pb2
                plugin_data = plugin_data_pb2.HParamsPluginData()
                plugin_data.ParseFromString(value.metadata.plugin_data.content)
                for key, param in six.iteritems(
                        plugin_data.session_start_info.hparams):
                    if not wandb.run.config.get(key):
                        wandb.run.config[
                            key] = param.number_value or param.string_value or param.bool_value
            else:
                wandb.termerror(
                    "Received hparams tf.summary, but could not import the hparams plugin from tensorboard"
                )
    return values
Example #17
0
def tf_summary_to_dict(tf_summary_str_or_pb, namespace=""):
    """Convert a Tensorboard Summary to a dictionary

    Accepts either a tensorflow.summary.Summary
    or one encoded as a string.
    """
    values = {}
    if hasattr(tf_summary_str_or_pb, "summary"):
        summary_pb = tf_summary_str_or_pb.summary
        values[namespaced_tag("global_step",
                              namespace)] = tf_summary_str_or_pb.step
        values["_timestamp"] = tf_summary_str_or_pb.wall_time
    elif isinstance(tf_summary_str_or_pb, (str, bytes, bytearray)):
        summary_pb = Summary()
        summary_pb.ParseFromString(tf_summary_str_or_pb)
    else:
        if not hasattr(tf_summary_str_or_pb, "value"):
            raise ValueError(
                "Can't log %s, only Event, Summary, or Summary proto buffer strings are accepted"
            )
        else:
            summary_pb = tf_summary_str_or_pb

    for value in summary_pb.value:
        kind = value.WhichOneof("value")
        if kind == "simple_value":
            values[namespaced_tag(value.tag, namespace)] = value.simple_value
        elif kind == "image":
            from PIL import Image
            image = wandb.Image(
                Image.open(six.BytesIO(value.image.encoded_image_string)))
            tag_idx = value.tag.rsplit('/', 1)
            if len(tag_idx) > 1 and tag_idx[1].isdigit():
                tag, idx = tag_idx
                values.setdefault(history_image_key(tag, namespace),
                                  []).append(image)
            else:
                values[history_image_key(value.tag, namespace)] = image
        # Coming soon...
        # elif kind == "audio":
        #    audio = wandb.Audio(six.BytesIO(value.audio.encoded_audio_string),
        #                        sample_rate=value.audio.sample_rate, content_type=value.audio.content_type)
        elif kind == "histo":
            first = value.histo.bucket_limit[0] + \
                value.histo.bucket_limit[0] - value.histo.bucket_limit[1]
            last = value.histo.bucket_limit[-2] + \
                value.histo.bucket_limit[-2] - value.histo.bucket_limit[-3]
            np_histogram = (list(value.histo.bucket),
                            [first] + value.histo.bucket_limit[:-1] + [last])
            values[namespaced_tag(
                value.tag,
                namespace)] = wandb.Histogram(np_histogram=np_histogram)
        elif value.tag == "_hparams_/session_start_info":
            if wandb.util.get_module("tensorboard.plugins.hparams"):
                from tensorboard.plugins.hparams import plugin_data_pb2
                plugin_data = plugin_data_pb2.HParamsPluginData()
                plugin_data.ParseFromString(value.metadata.plugin_data.content)
                for key, param in six.iteritems(
                        plugin_data.session_start_info.hparams):
                    if not wandb.run.config.get(key):
                        wandb.run.config[
                            key] = param.number_value or param.string_value or param.bool_value
            else:
                wandb.termerror(
                    "Received hparams tf.summary, but could not import the hparams plugin from tensorboard"
                )

    return values
Example #18
0
def add_image(writer, tag, img):
    summary = Summary(value=[Summary.Value(tag=tag, image=summary_image(img))])
    writer.file_writer.add_summary(summary)
Example #19
0
 def _video(self, tag, vid):
     tag = tbxsummary._clean_tag(tag)
     return TBXSummary(value=[TBXSummary.Value(tag=tag, image=vid)])