Esempio n. 1
0
def experiment_pb(hparam_infos,
                  metric_infos,
                  user="",
                  description="",
                  time_created_secs=None):
    """Creates a summary that defines a hyperparameter-tuning experiment.

    Args:
      hparam_infos: Array of api_pb2.HParamInfo messages. Describes the
          hyperparameters used in the experiment.
      metric_infos: Array of api_pb2.MetricInfo messages. Describes the metrics
          used in the experiment. See the documentation at the top of this file
          for how to populate this.
      user: String. An id for the user running the experiment
      description: String. A description for the experiment. May contain markdown.
      time_created_secs: float. The time the experiment is created in seconds
      since the UNIX epoch. If None uses the current time.

    Returns:
      A summary protobuffer containing the experiment definition.
    """
    if time_created_secs is None:
        time_created_secs = time.time()
    experiment = api_pb2.Experiment(
        description=description,
        user=user,
        time_created_secs=time_created_secs,
        hparam_infos=hparam_infos,
        metric_infos=metric_infos,
    )
    return _summary(
        metadata.EXPERIMENT_TAG,
        plugin_data_pb2.HParamsPluginData(experiment=experiment),
    )
Esempio n. 2
0
def experiment_pb(
    hparam_infos,
    metric_infos,
    user="",
    description="",
    time_created=datetime.datetime.now()):
  """Creates a summary that defines a hyperparameter-tuning experiment.
  Arguments:
    hparam_infos: Array of api_pb2.HParamInfo messages. Describes the
        hyperparameters used in the experiment.
    metric_infos: Array of api_pb2.MetricInfo messages. Describes the metrics
        used in the experiment. See the documentation at the top of this file
        for how to populate this.
    user: String. An id for the user running the experiment
    description: String. A description for the experiment. May contain markdown.
    time_created: datetime. The time the experiment is created.

  Returns:
    A summary protobuffer containing the experiment definition.
  """
  experiment = api_pb2.Experiment(
      description=description,
      user=user,
      time_created=timestamp_pb2.Timestamp().FromDatetime(time_created),
      hparam_infos=hparam_infos,
      metric_infos=metric_infos)
  return _summary(metadata.EXPERIMENT_TAG,
                  plugin_data_pb2.HParamsPluginData(experiment=experiment))
Esempio n. 3
0
 def _compute_experiment_from_runs(self):
     """Computes a minimal Experiment protocol buffer by scanning the runs."""
     hparam_infos = self._compute_hparam_infos()
     if not hparam_infos:
         return None
     metric_infos = self._compute_metric_infos()
     return api_pb2.Experiment(hparam_infos=hparam_infos,
                               metric_infos=metric_infos)
Esempio n. 4
0
 def test_experiment_pb(self):
     hparam_infos = [
         api_pb2.HParamInfo(
             name="param1",
             display_name="display_name1",
             description="foo",
             type=api_pb2.DATA_TYPE_STRING,
             domain_discrete=struct_pb2.ListValue(values=[
                 struct_pb2.Value(string_value="a"),
                 struct_pb2.Value(string_value="b"),
             ]),
         ),
         api_pb2.HParamInfo(
             name="param2",
             display_name="display_name2",
             description="bar",
             type=api_pb2.DATA_TYPE_FLOAT64,
             domain_interval=api_pb2.Interval(min_value=-100.0,
                                              max_value=100.0),
         ),
     ]
     metric_infos = [
         api_pb2.MetricInfo(
             name=api_pb2.MetricName(tag="loss"),
             dataset_type=api_pb2.DATASET_VALIDATION,
         ),
         api_pb2.MetricInfo(
             name=api_pb2.MetricName(group="train/", tag="acc"),
             dataset_type=api_pb2.DATASET_TRAINING,
         ),
     ]
     time_created_secs = 314159.0
     self.assertEqual(
         summary.experiment_pb(hparam_infos,
                               metric_infos,
                               time_created_secs=time_created_secs),
         tf.compat.v1.Summary(value=[
             tf.compat.v1.Summary.Value(
                 tag="_hparams_/experiment",
                 tensor=summary._TF_NULL_TENSOR,
                 metadata=tf.compat.v1.SummaryMetadata(
                     plugin_data=tf.compat.v1.SummaryMetadata.PluginData(
                         plugin_name="hparams",
                         content=(plugin_data_pb2.HParamsPluginData(
                             version=0,
                             experiment=api_pb2.Experiment(
                                 time_created_secs=time_created_secs,
                                 hparam_infos=hparam_infos,
                                 metric_infos=metric_infos,
                             ),
                         ).SerializeToString()),
                     )),
             )
         ]),
     )
Esempio n. 5
0
 def _compute_experiment_from_runs(self, ctx, experiment_id,
                                   hparams_run_to_tag_to_content):
     """Computes a minimal Experiment protocol buffer by scanning the
     runs."""
     hparam_infos = self._compute_hparam_infos(
         hparams_run_to_tag_to_content)
     if not hparam_infos:
         return None
     metric_infos = self._compute_metric_infos(
         ctx, experiment_id, hparams_run_to_tag_to_content)
     return api_pb2.Experiment(hparam_infos=hparam_infos,
                               metric_infos=metric_infos)
Esempio n. 6
0
 def _run_handler(self, experiment, session_groups, response_format):
     experiment_proto = text_format.Merge(experiment, api_pb2.Experiment())
     session_groups_proto = text_format.Merge(
         session_groups, api_pb2.ListSessionGroupsResponse())
     num_columns = len(experiment_proto.hparam_infos) + len(
         experiment_proto.metric_infos)
     handler = download_data.Handler(
         backend_context.Context(self._mock_tb_context),
         experiment_proto,
         session_groups_proto,
         response_format,
         [True] * num_columns,
     )
     return handler.run()
Esempio n. 7
0
    def _compute_experiment_from_runs(self, ctx, experiment_id,
                                      hparams_run_to_tag_to_content):
        """Computes a minimal Experiment protocol buffer by scanning the runs.

        Returns an empty Experiment if there are no hparam infos logged.
        """
        hparam_infos = self._compute_hparam_infos(
            hparams_run_to_tag_to_content)
        if hparam_infos:
            metric_infos = self._compute_metric_infos(
                ctx, experiment_id, hparams_run_to_tag_to_content)
        else:
            metric_infos = []
        return api_pb2.Experiment(hparam_infos=hparam_infos,
                                  metric_infos=metric_infos)
Esempio n. 8
0
def hparams_config_pb(hparams, metrics, time_created_secs=None):
    # NOTE: Keep docs in sync with `hparams_config` above.
    """Create a top-level experiment configuration.

    This configuration describes the hyperparameters and metrics that will
    be tracked in the experiment, but does not record any actual values of
    those hyperparameters and metrics. It can be created before any models
    are actually trained.

    Args:
      hparams: A list of `HParam` values.
      metrics: A list of `Metric` values.
      time_created_secs: The time that this experiment was created, as
        seconds since epoch. Defaults to the current time.

    Returns:
      A TensorBoard `summary_pb2.Summary` message.
    """
    hparam_infos = []
    for hparam in hparams:
        info = api_pb2.HParamInfo(
            name=hparam.name,
            description=hparam.description,
            display_name=hparam.display_name,
        )
        domain = hparam.domain
        if domain is not None:
            domain.update_hparam_info(info)
        hparam_infos.append(info)
    metric_infos = [metric.as_proto() for metric in metrics]
    experiment = api_pb2.Experiment(
        hparam_infos=hparam_infos,
        metric_infos=metric_infos,
        time_created_secs=time_created_secs,
    )
    return _summary_pb(
        metadata.EXPERIMENT_TAG,
        plugin_data_pb2.HParamsPluginData(experiment=experiment),
    )
Esempio n. 9
0
    def setUp(self):
        self.logdir = os.path.join(self.get_temp_dir(), "logs")

        self.hparams = [
            hp.HParam("learning_rate", hp.RealInterval(1e-2, 1e-1)),
            hp.HParam("dense_layers", hp.IntInterval(2, 7)),
            hp.HParam("optimizer", hp.Discrete(["adam", "sgd"])),
            hp.HParam("who_knows_what"),
            hp.HParam(
                "magic",
                hp.Discrete([False, True]),
                display_name="~*~ Magic ~*~",
                description="descriptive",
            ),
        ]
        self.metrics = [
            hp.Metric("samples_per_second"),
            hp.Metric(group="train",
                      tag="batch_loss",
                      display_name="loss (train)"),
            hp.Metric(
                group="validation",
                tag="epoch_accuracy",
                display_name="accuracy (val.)",
                description="Accuracy on the _validation_ dataset.",
                dataset_type=hp.Metric.VALIDATION,
            ),
        ]
        self.time_created_secs = 1555624767.0

        self.expected_experiment_pb = api_pb2.Experiment()
        text_format.Merge(
            """
            time_created_secs: 1555624767.0
            hparam_infos {
              name: "learning_rate"
              type: DATA_TYPE_FLOAT64
              domain_interval {
                min_value: 0.01
                max_value: 0.1
              }
            }
            hparam_infos {
              name: "dense_layers"
              type: DATA_TYPE_FLOAT64
              domain_interval {
                min_value: 2
                max_value: 7
              }
            }
            hparam_infos {
              name: "optimizer"
              type: DATA_TYPE_STRING
              domain_discrete {
                values {
                  string_value: "adam"
                }
                values {
                  string_value: "sgd"
                }
              }
            }
            hparam_infos {
              name: "who_knows_what"
            }
            hparam_infos {
              name: "magic"
              type: DATA_TYPE_BOOL
              display_name: "~*~ Magic ~*~"
              description: "descriptive"
              domain_discrete {
                values {
                  bool_value: false
                }
                values {
                  bool_value: true
                }
              }
            }
            metric_infos {
              name {
                tag: "samples_per_second"
              }
            }
            metric_infos {
              name {
                group: "train"
                tag: "batch_loss"
              }
              display_name: "loss (train)"
            }
            metric_infos {
              name {
                group: "validation"
                tag: "epoch_accuracy"
              }
              display_name: "accuracy (val.)"
              description: "Accuracy on the _validation_ dataset."
              dataset_type: DATASET_VALIDATION
            }
            """,
            self.expected_experiment_pb,
        )
Esempio n. 10
0
  def test_summary_pb(self):
    hparams = [
        hp.HParam("learning_rate", hp.RealInterval(1e-2, 1e-1)),
        hp.HParam("dense_layers", hp.IntInterval(2, 7)),
        hp.HParam("optimizer", hp.Discrete(["adam", "sgd"])),
        hp.HParam("who_knows_what"),
        hp.HParam(
            "magic",
            hp.Discrete([False, True]),
            display_name="~*~ Magic ~*~",
            description="descriptive",
        ),
    ]
    metrics = [
        hp.Metric("samples_per_second"),
        hp.Metric(group="train", tag="batch_loss", display_name="loss (train)"),
        hp.Metric(
            group="validation",
            tag="epoch_accuracy",
            display_name="accuracy (val.)",
            description="Accuracy on the _validation_ dataset.",
            dataset_type=hp.Metric.VALIDATION,
        ),
    ]
    experiment = hp.Experiment(
        hparams=hparams,
        metrics=metrics,
        user="******",
        description="nothing to see here; move along",
        time_created_secs=1555624767,
    )

    self.assertEqual(experiment.hparams, hparams)
    self.assertEqual(experiment.metrics, metrics)
    self.assertEqual(experiment.user, "zalgo"),
    self.assertEqual(experiment.description, "nothing to see here; move along")
    self.assertEqual(experiment.time_created_secs, 1555624767)

    expected_experiment_pb = api_pb2.Experiment()
    text_format.Merge(
        """
        description: "nothing to see here; move along"
        user: "******"
        time_created_secs: 1555624767.0
        hparam_infos {
          name: "learning_rate"
          type: DATA_TYPE_FLOAT64
          domain_interval {
            min_value: 0.01
            max_value: 0.1
          }
        }
        hparam_infos {
          name: "dense_layers"
          type: DATA_TYPE_FLOAT64
          domain_interval {
            min_value: 2
            max_value: 7
          }
        }
        hparam_infos {
          name: "optimizer"
          type: DATA_TYPE_STRING
          domain_discrete {
            values {
              string_value: "adam"
            }
            values {
              string_value: "sgd"
            }
          }
        }
        hparam_infos {
          name: "who_knows_what"
        }
        hparam_infos {
          name: "magic"
          type: DATA_TYPE_BOOL
          display_name: "~*~ Magic ~*~"
          description: "descriptive"
          domain_discrete {
            values {
              bool_value: false
            }
            values {
              bool_value: true
            }
          }
        }
        metric_infos {
          name {
            tag: "samples_per_second"
          }
        }
        metric_infos {
          name {
            group: "train"
            tag: "batch_loss"
          }
          display_name: "loss (train)"
        }
        metric_infos {
          name {
            group: "validation"
            tag: "epoch_accuracy"
          }
          display_name: "accuracy (val.)"
          description: "Accuracy on the _validation_ dataset."
          dataset_type: DATASET_VALIDATION
        }
        """,
        expected_experiment_pb,
    )
    actual_summary_pb = experiment.summary_pb()
    plugin_content = actual_summary_pb.value[0].metadata.plugin_data.content
    self.assertEqual(
        metadata.parse_experiment_plugin_data(plugin_content),
        expected_experiment_pb,
    )