Beispiel #1
0
 def test_scalar(self):
     res = make_np(1.1)
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
     res = make_np(1 << 64 - 1)  # uint64_max
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
     res = make_np(np.float16(1.00000087))
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
     res = make_np(np.float128(1.00008 + 9))
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
     res = make_np(np.int64(100000000000))
     self.assertIsInstance(res, np.ndarray) and self.assertEqual(res.shape, (1,))
Beispiel #2
0
 def add_pr_curve(self,
                  tag,
                  labels,
                  predictions,
                  global_step=None,
                  num_thresholds=127,
                  weights=None,
                  walltime=None):
     torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve")
     labels, predictions = make_np(labels), make_np(predictions)
     self._get_file_writer().add_summary(
         pr_curve(tag, labels, predictions, num_thresholds, weights),
         global_step, walltime)
Beispiel #3
0
 def test_convert_to_HWC_dtype_remains_same(self):
     # test to ensure convert_to_HWC restores the dtype of input np array and
     # thus the scale_factor calculated for the image is 1
     test_image = torch.tensor([[[[1, 2, 3], [4, 5, 6]]]], dtype=torch.uint8)
     tensor = make_np(test_image)
     tensor = convert_to_HWC(tensor, 'NCHW')
     scale_factor = summary._calc_scale_factor(tensor)
     self.assertEqual(scale_factor, 1, msg='Values are already in [0, 255], scale factor should be 1')
Beispiel #4
0
def _make_session_start_summary(
    hparam_values,
    group_name: Optional[str] = None,
    start_time_secs: Optional[int] = None,
):
    """Assign values to the hyperparameters in the context of this session.

    Args:
        hparam_values: a dict of ``hp_name`` -> ``hp_value`` mappings
        group_name: optional group name for this session
        start_time_secs: optional starting time in seconds

    Returns:

    """
    if start_time_secs is None:
        start_time_secs = int(time.time())
    session_start_info = SessionStartInfo(group_name=group_name,
                                          start_time_secs=start_time_secs)

    for hp_name, hp_value in hparam_values.items():
        # Logging a None would raise an exception when setting session_start_info.hparams[hp_name].number_value = None.
        # Logging a float.nan instead would work, but that run would not show at all in the tensorboard hparam plugin.
        # The best thing to do here is to skip that value, it will show as a blank cell in the table view of the
        # tensorboard plugin. However, that run would not be shown in the parallel coord or in the scatter plot view.
        if hp_value is None:
            logger.warning(
                f"Hyper parameter {hp_name} is `None`: the tensorboard hp plugin "
                f"will show this run in table view, but not in parallel coordinates "
                f"view or in scatter plot matrix view")
            continue

        if isinstance(hp_value, (str, list, tuple)):
            session_start_info.hparams[hp_name].string_value = str(hp_value)
            continue

        if isinstance(hp_value, bool):
            session_start_info.hparams[hp_name].bool_value = hp_value
            continue

        if not isinstance(hp_value, (int, float)):
            hp_value = make_np(hp_value)[0]

        session_start_info.hparams[hp_name].number_value = hp_value

    session_start_content = HParamsPluginData(
        session_start_info=session_start_info, version=PLUGIN_DATA_VERSION)
    session_start_summary_metadata = SummaryMetadata(
        plugin_data=SummaryMetadata.PluginData(
            plugin_name=PLUGIN_NAME,
            content=session_start_content.SerializeToString()))
    session_start_summary = Summary(value=[
        Summary.Value(tag=SESSION_START_INFO_TAG,
                      metadata=session_start_summary_metadata)
    ])

    return session_start_summary
Beispiel #5
0
    def add_embedding(self,
                      mat,
                      metadata=None,
                      label_img=None,
                      global_step=None,
                      tag='default',
                      metadata_header=None):
        torch._C._log_api_usage_once("tensorboard.logging.add_embedding")
        mat = make_np(mat)
        if global_step is None:
            global_step = 0
            # clear pbtxt?

        # Maybe we should encode the tag so slashes don't trip us up?
        # I don't think this will mess us up, but better safe than sorry.
        subdir = "%s/%s" % (str(global_step).zfill(5), self._encode(tag))
        save_path = os.path.join(self._get_file_writer().get_logdir(), subdir)

        fs = tf.io.gfile.get_filesystem(save_path)
        if fs.exists(save_path):
            if fs.isdir(save_path):
                print(
                    'warning: Embedding dir exists, did you set global_step for add_embedding()?'
                )
            else:
                raise Exception(
                    "Path: `%s` exists, but is a file. Cannot proceed." %
                    save_path)
        else:
            fs.makedirs(save_path)

        if metadata is not None:
            assert mat.shape[0] == len(
                metadata), '#labels should equal with #data points'
            make_tsv(metadata, save_path, metadata_header=metadata_header)

        if label_img is not None:
            assert mat.shape[0] == label_img.shape[
                0], '#images should equal with #data points'
            make_sprite(label_img, save_path)

        assert mat.ndim == 2, 'mat should be 2D, where mat.size(0) is the number of data points'
        make_mat(mat, save_path)

        # Filesystem doesn't necessarily have append semantics, so we store an
        # internal buffer to append to and re-write whole file after each
        # embedding is added
        if not hasattr(self, "_projector_config"):
            self._projector_config = ProjectorConfig()
        embedding_info = get_embedding_info(metadata, label_img, fs, subdir,
                                            global_step, tag)
        self._projector_config.embeddings.extend([embedding_info])

        from google.protobuf import text_format
        config_pbtxt = text_format.MessageToString(self._projector_config)
        write_pbtxt(self._get_file_writer().get_logdir(), config_pbtxt)
Beispiel #6
0
        def test_pytorch_histogram_raw(self):
            with SummaryWriter() as w:
                num = 50
                floats = make_np(torch.rand((num, )))
                bins = [0.0, 0.25, 0.5, 0.75, 1.0]
                counts, limits = np.histogram(floats, bins)
                sum_sq = floats.dot(floats).item()
                w.add_histogram_raw('float histogram raw',
                                    min=floats.min().item(),
                                    max=floats.max().item(),
                                    num=num,
                                    sum=floats.sum().item(),
                                    sum_squares=sum_sq,
                                    bucket_limits=limits[1:].tolist(),
                                    bucket_counts=counts.tolist())

                ints = make_np(torch.randint(0, 100, (num, )))
                bins = [0, 25, 50, 75, 100]
                counts, limits = np.histogram(ints, bins)
                sum_sq = ints.dot(ints).item()
                w.add_histogram_raw('int histogram raw',
                                    min=ints.min().item(),
                                    max=ints.max().item(),
                                    num=num,
                                    sum=ints.sum().item(),
                                    sum_squares=sum_sq,
                                    bucket_limits=limits[1:].tolist(),
                                    bucket_counts=counts.tolist())

                ints = torch.tensor(range(0, 100)).float()
                nbins = 100
                counts = torch.histc(ints, bins=nbins, min=0, max=99)
                limits = torch.tensor(range(nbins))
                sum_sq = ints.dot(ints).item()
                w.add_histogram_raw('int histogram raw',
                                    min=ints.min().item(),
                                    max=ints.max().item(),
                                    num=num,
                                    sum=ints.sum().item(),
                                    sum_squares=sum_sq,
                                    bucket_limits=limits.tolist(),
                                    bucket_counts=counts.tolist())
Beispiel #7
0
        def test_pytorch_np(self):
            tensors = [
                torch.rand(3, 10, 10),
                torch.rand(1),
                torch.rand(1, 2, 3, 4, 5)
            ]
            for tensor in tensors:
                # regular tensor
                self.assertIsInstance(make_np(tensor), np.ndarray)

                # CUDA tensor
                if torch.cuda.device_count() > 0:
                    self.assertIsInstance(make_np(tensor.cuda()), np.ndarray)

                # regular variable
                self.assertIsInstance(make_np(torch.autograd.Variable(tensor)),
                                      np.ndarray)

                # CUDA variable
                if torch.cuda.device_count() > 0:
                    self.assertIsInstance(
                        make_np(torch.autograd.Variable(tensor).cuda()),
                        np.ndarray)

            # python primitive type
            self.assertIsInstance(make_np(0), np.ndarray)
            self.assertIsInstance(make_np(0.1), np.ndarray)
Beispiel #8
0
 def test_pytorch_autograd_np(self):
     x = torch.autograd.Variable(torch.Tensor(1))
     self.assertIsInstance(make_np(x), np.ndarray)
Beispiel #9
0
 def test_pytorch_np_expect_fail(self):
     with self.assertRaises(NotImplementedError):
         res = make_np({'pytorch': 1.0})
Beispiel #10
0
 def test_caffe2_np_expect_fail(self):
     with self.assertRaises(RuntimeError):
         res = make_np('This_blob_does_not_exist')
Beispiel #11
0
 def test_caffe2_np(self):
     workspace.FeedBlob("testBlob", tensor_N(shape=(1, 3, 64, 64)))
     self.assertIsInstance(make_np('testBlob'), np.ndarray)
Beispiel #12
0
 def test_caffe2_np(self):
     workspace.FeedBlob("testBlob", np.random.randn(1, 3, 64, 64).astype(np.float32))
     self.assertIsInstance(make_np('testBlob'), np.ndarray)