Пример #1
0
    def pop_batch_op(self):
        """
        This function is used when connecting a sampler output
        to a network. e.g.::

            data_dict = self.get_sampler()[0].pop_batch_op(device_id)
            net_output = net_model(data_dict, is_training)

        .. caution::

            Note it squeezes the output tensor of 6 dims
            ``[batch, x, y, z, time, modality]``
            by removing all dims along which length is one.

        :return: a tensorflow graph op
        """
        assert all([thread.isAlive() for thread in self._threads]), \
            "input sampling threads are not running"
        if self._window.has_dynamic_shapes:
            data_output = self._dequeue_func()
        else:
            data_output = self._dequeue_func(self._batch_size)
        for (name, shape) in self._window.shapes.items():
            # set first dim as the batch size
            data_output[name].set_shape([self._batch_size] + list(shape[1:]))
        for name in data_output:
            data_output[name] = squeeze_spatial_temporal_dim(data_output[name])

        # keep a copy of the sampler's output tensors
        self.output_tensor = data_output
        return data_output
    def pop_batch_op(self):
        """
        This function is used when connecting a sampler output
        to a network. e.g.::

            data_dict = self.get_sampler()[0].pop_batch_op(device_id)
            net_output = net_model(data_dict['image'], is_training)

        .. caution::

            Note it squeezes the output tensor of 6 dims
            ``[batch, x, y, z, time, modality]``
            by removing all dims along which length is one.

        :return: a dictionary of image window tensors.
        """

        if self.dataset is None or self.iterator is None:
            # in case `run_threads` is not called,
            # here we initialise the dataset and iterator
            self.init_dataset()
            self.iterator = self.dataset.make_one_shot_iterator()
            # self.iterator = tf.data.Iterator.from_structure(
            #     self.dataset.output_types, self.dataset.output_shapes)

        window_output = self.iterator.get_next()
        for name in window_output:
            window_output[name] = squeeze_spatial_temporal_dim(
                window_output[name])
        return window_output
Пример #3
0
    def pop_batch_op(self):
        """
        This function is used when connecting a sampler output
        to a network. e.g.::

            data_dict = self.get_sampler()[0].pop_batch_op(device_id)
            net_output = net_model(data_dict['image'], is_training)

        .. caution::

            Note it squeezes the output tensor of 6 dims
            ``[batch, x, y, z, time, modality]``
            by removing all dims along which length is one.

        :return: a dictionary of image window tensors.
        """

        if self.dataset is None or self.iterator is None:
            self.init_dataset()
            self.iterator = self.dataset.make_initializable_iterator()

        window_output = self.iterator.get_next()
        if not self.from_generator:
            window_output = window_output[0]
        # for name in list(self.shapes):
        #     window_output[name].set_shape(
        #         [self.batch_size] + list(self.shapes[name][1:]))
        for name in window_output:
            window_output[name] = squeeze_spatial_temporal_dim(
                window_output[name])
        return window_output
Пример #4
0
    def pop_batch_op(self):
        """
        This function is used when connecting a sampler output
        to a network. e.g.::

            data_dict = self.get_sampler()[0].pop_batch_op(device_id)
            net_output = net_model(data_dict, is_training)

        .. caution::

            Note it squeezes the output tensor of 6 dims
            ``[batch, x, y, z, time, modality]``
            by removing all dims along which length is one.

        :return: a tensorflow graph op
        """
        assert all([thread.isAlive() for thread in self._threads]), \
            "input sampling threads are not running"
        if self._window.has_dynamic_shapes:
            data_output = self._dequeue_func()
        else:
            data_output = self._dequeue_func(self._batch_size)
        for (name, shape) in self._window.shapes.items():
            data_output[name].set_shape([self._batch_size] + list(shape))
        for name in data_output:
            data_output[name] = squeeze_spatial_temporal_dim(data_output[name])

        # keep a copy of the sampler's output tensors
        self.output_tensor = data_output
        return data_output
Пример #5
0
    def pop_batch_op(self):
        """
        This function is used when connecting a sampler output
        to a network. e.g.,
            data_dict = self.get_sampler()[0].pop_batch_op(device_id)
            net_output = net_model(data_dict, is_training)
        Note it squeezes the output tensor of 6 dims
        [batch, x, y, z, time, modality]
        by removing all dims along which length is one.

        :param as_numpy_array: a boolean value indicating numpy outputs
        :return: a tensorflow graph op if not as_numpy_array,
                 otherwise returns a numpy array
        """
        assert all([thread.isAlive() for thread in self._threads]), \
            "input sampling threads are not running"
        if self._window.has_dynamic_shapes:
            data_output = self._dequeue_func()
        else:
            data_output = self._dequeue_func(self._batch_size)
        for (name, shape) in self._window.shapes.items():
            data_output[name].set_shape([self._batch_size] + list(shape))
        for name in data_output:
            data_output[name] = squeeze_spatial_temporal_dim(data_output[name])

        # keep a copy of the sampler's output tensors
        self.output_tensor = data_output
        return data_output
Пример #6
0
    def pop_batch_op(self):
        """
        This function is used when connecting a sampler output
        to a network. e.g.::

            data_dict = self.get_sampler()[0].pop_batch_op(device_id)
            net_output = net_model(data_dict['image'], is_training)

        .. caution::

            Note it squeezes the output tensor of 6 dims
            ``[batch, x, y, z, time, modality]``
            by removing all dims along which length is one.

        :return: a dictionary of image window tensors.
        """

        if self.dataset is None or self.iterator is None:
            self.init_dataset()
            self.iterator = self.dataset.make_initializable_iterator()

        window_output = self.iterator.get_next()
        if not self.from_generator:
            window_output = window_output[0]
        # for name in list(self.shapes):
        #     window_output[name].set_shape(
        #         [self.batch_size] + list(self.shapes[name][1:]))
        for name in window_output:
            window_output[name] = squeeze_spatial_temporal_dim(
                window_output[name])
        return window_output
Пример #7
0
    def pop_batch_op(self):
        """
        This function is used when connecting a sampler output
        to a network. e.g.,
            data_dict = self.get_sampler()[0].pop_batch_op(device_id)
            net_output = net_model(data_dict, is_training)
        Note it squeezes the output tensor of 6 dims
        [batch, x, y, z, time, modality]
        by removing all dims along which length is one.

        :param device_id: integer representing the GPU
        :return: a tensorflow graph op
        """
        if self._window.has_dynamic_shapes:
            data_output = self._dequeue_func()
        else:
            data_output = self._dequeue_func(self._batch_size)
        for (name, shape) in self._window.shapes.items():
            data_output[name].set_shape([self._batch_size] + list(shape))

        for name in data_output:
            data_output[name] = squeeze_spatial_temporal_dim(data_output[name])
        return data_output