Ejemplo n.º 1
0
    def __init__(self,
                 names=('vector', ),
                 vector_size=(100, ),
                 batch_size=10,
                 n_interpolations=10,
                 mean=0.0,
                 stddev=1.0,
                 repeat=1,
                 queue_length=10,
                 name='random_vector_sampler'):
        # repeat=None for infinite loops
        Layer.__init__(self, name=name)

        self.n_interpolations = max(n_interpolations, 1)
        self.mean = mean
        self.stddev = stddev
        self.repeat = repeat
        InputBatchQueueRunner.__init__(self,
                                       capacity=queue_length,
                                       shuffle=False)
        tf.logging.info('reading size of preprocessed images')
        vector_shapes = {names[0]: vector_size}
        vector_dtypes = {names[0]: tf.float32}
        self.window = ImageWindow(shapes=vector_shapes, dtypes=vector_dtypes)
        tf.logging.info('initialised window instance')
        self._create_queue_and_ops(self.window,
                                   enqueue_size=self.n_interpolations,
                                   dequeue_size=batch_size)
        tf.logging.info("initialised sampler output %s ", self.window.shapes)
Ejemplo n.º 2
0
    def __init__(self,
                 reader,
                 data_param,
                 batch_size,
                 spatial_window_size=(),
                 window_border=(),
                 queue_length=10):
        self.batch_size = batch_size
        self.reader = reader
        Layer.__init__(self, name='input_buffer')
        InputBatchQueueRunner.__init__(
            self,
            capacity=queue_length,
            shuffle=False)
        tf.logging.info('reading size of preprocessed inputs')
        self.window = ImageWindow.from_data_reader_properties(
            self.reader.input_sources,
            self.reader.shapes,
            self.reader.tf_dtypes,
            data_param)

        if spatial_window_size:
            # override all spatial window defined in input
            # modalities sections
            # this is useful when do inference with a spatial window
            # which is different from the training specifications
            self.window.set_spatial_shape(spatial_window_size)
        self.border_size = window_border
        tf.logging.info('initialised window instance')
        self._create_queue_and_ops(self.window,
                                   enqueue_size=1,
                                   dequeue_size=batch_size)
        tf.logging.info("initialised sampler output %s", self.window.shapes)
Ejemplo n.º 3
0
    def __init__(self,
                 reader,
                 data_param,
                 batch_size,
                 spatial_window_size=(),
                 windows_per_image=1,
                 shuffle_buffer=True,
                 queue_length=10,
                 name='resize_sampler'):

        self.reader = reader
        self.windows_per_image = windows_per_image
        self.shuffle = bool(shuffle_buffer)

        Layer.__init__(self, name=name)
        InputBatchQueueRunner.__init__(self,
                                       capacity=queue_length,
                                       shuffle=self.shuffle)
        tf.logging.info('reading size of preprocessed images')
        self.window = ImageWindow.from_data_reader_properties(
            self.reader.input_sources, self.reader.shapes,
            self.reader.tf_dtypes, data_param)
        if spatial_window_size:
            # override all spatial window defined in input
            # modalities sections
            # this is useful when do inference with a spatial window
            # which is different from the training specifications
            self.window.set_spatial_shape(spatial_window_size)
        tf.logging.info('initialised window instance')
        self._create_queue_and_ops(self.window,
                                   enqueue_size=1,
                                   dequeue_size=batch_size)
        tf.logging.info("initialised sampler output %s ", self.window.shapes)
    def __init__(self,
                 reader,
                 data_param,
                 batch_size=10,
                 n_interpolations=10,
                 queue_length=10,
                 name='linear_interpolation_sampler'):
        self.n_interpolations = n_interpolations
        self.reader = reader
        Layer.__init__(self, name=name)
        InputBatchQueueRunner.__init__(self,
                                       capacity=queue_length,
                                       shuffle=False)
        tf.logging.info('reading size of preprocessed images')
        self.window = ImageWindow.from_data_reader_properties(
            self.reader.input_sources, self.reader.shapes,
            self.reader.tf_dtypes, data_param)
        # only try to use the first spatial shape available
        image_spatial_shape = list(self.reader.shapes.values())[0][:3]
        self.window.set_spatial_shape(image_spatial_shape)

        tf.logging.info('initialised window instance')
        self._create_queue_and_ops(self.window,
                                   enqueue_size=self.n_interpolations,
                                   dequeue_size=batch_size)
        tf.logging.info("initialised sampler output %s ", self.window.shapes)

        assert not self.window.has_dynamic_shapes, \
            "dynamic shapes not supported, please specify " \
            "spatial_window_size = (1, 1, 1)"
Ejemplo n.º 5
0
    def __init__(self,
                 reader,
                 data_param,
                 batch_size,
                 windows_per_image,
                 queue_length=10):
        self.reader = reader
        Layer.__init__(self, name='input_buffer')
        InputBatchQueueRunner.__init__(self,
                                       capacity=queue_length,
                                       shuffle=True)
        tf.logging.info('reading size of preprocessed images')
        self.window = ImageWindow.from_data_reader_properties(
            self.reader.input_sources, self.reader.shapes,
            self.reader.tf_dtypes, data_param)

        tf.logging.info('initialised window instance')
        self._create_queue_and_ops(self.window,
                                   enqueue_size=windows_per_image,
                                   dequeue_size=batch_size)
        tf.logging.info(
            "initialised sampler output %s "
            " [-1 for dynamic size]", self.window.shapes)

        self.spatial_coordinates_generator = rand_spatial_coordinates
Ejemplo n.º 6
0
 def __init__(self,
              names=('vector',),
              vector_size=(100,),
              batch_size=10,
              n_interpolations=10,
              mean=0.0,
              stddev=1.0,
              repeat=1,
              queue_length=10):
     self.n_interpolations = max(n_interpolations, 1)
     self.mean = mean
     self.stddev = stddev
     self.repeat = repeat
     Layer.__init__(self, name='input_buffer')
     InputBatchQueueRunner.__init__(
         self,
         capacity=queue_length,
         shuffle=False)
     tf.logging.info('reading size of preprocessed images')
     self.names = names
     vector_shapes = {names[0]: vector_size}
     vector_dtypes = {names[0]: tf.float32}
     self.window = ImageWindow(names=tuple(vector_shapes),
                               shapes=vector_shapes,
                               dtypes=vector_dtypes)
     tf.logging.info('initialised window instance')
     self._create_queue_and_ops(self.window,
                                enqueue_size=self.n_interpolations,
                                dequeue_size=batch_size)
     tf.logging.info("initialised sampler output %s ", self.window.shapes)
Ejemplo n.º 7
0
    def __init__(self,
                 reader,
                 data_param,
                 batch_size,
                 spatial_window_size=None,
                 window_border=None,
                 queue_length=10,
                 name='grid_sampler'):
        self.batch_size = batch_size
        self.border_size = window_border or (0, 0, 0)
        self.reader = reader
        Layer.__init__(self, name=name)
        InputBatchQueueRunner.__init__(self,
                                       capacity=queue_length,
                                       shuffle=False)
        tf.logging.info('reading size of preprocessed inputs')

        # override all spatial window defined in input
        # modalities sections
        # this is useful when do inference with a spatial window
        # which is different from the training specifications
        self.window = ImageWindow.from_data_reader_properties(
            self.reader.input_sources, self.reader.shapes,
            self.reader.tf_dtypes, spatial_window_size or data_param)

        tf.logging.info('initialised window instance')
        self._create_queue_and_ops(self.window,
                                   enqueue_size=1,
                                   dequeue_size=batch_size)
        tf.logging.info("initialised sampler output %s", self.window.shapes)
Ejemplo n.º 8
0
    def __init__(self,
                 reader,
                 data_param,
                 batch_size=10,
                 n_interpolations=10,
                 queue_length=10):
        self.n_interpolations = n_interpolations
        self.reader = reader
        Layer.__init__(self, name='input_buffer')
        InputBatchQueueRunner.__init__(
            self,
            capacity=queue_length,
            shuffle=False)
        tf.logging.info('reading size of preprocessed images')
        self.window = ImageWindow.from_data_reader_properties(
            self.reader.input_sources,
            self.reader.shapes,
            self.reader.tf_dtypes,
            data_param)
        # only try to use the first spatial shape available
        image_spatial_shape = list(self.reader.shapes.values())[0][:3]
        self.window.set_spatial_shape(image_spatial_shape)

        tf.logging.info('initialised window instance')
        self._create_queue_and_ops(self.window,
                                   enqueue_size=self.n_interpolations,
                                   dequeue_size=batch_size)
        tf.logging.info("initialised sampler output %s ", self.window.shapes)

        assert not self.window.has_dynamic_shapes, \
            "dynamic shapes not supported, please specify " \
            "spatial_window_size = (1, 1, 1)"