def __init__(self, reader, window_sizes, batch_size=10, n_interpolations=10, queue_length=10, name='linear_interpolation_sampler'): ImageWindowDataset.__init__(self, reader, window_sizes=window_sizes, batch_size=batch_size, queue_length=queue_length, shuffle=False, epoch=1, smaller_final_batch_mode='drop', name=name) self.n_interpolations = n_interpolations # only try to use the first spatial shape available image_spatial_shape = list(self.reader.shapes.values())[0][:3] self.window.set_spatial_shape(image_spatial_shape) tf.logging.info("initialised linear interpolation sampler %s ", self.window.shapes) assert not self.window.has_dynamic_shapes, \ "dynamic shapes not supported, please specify " \ "spatial_window_size = (1, 1, 1)"
def __init__(self, reader, window_sizes, batch_size=1, spatial_window_size=None, window_border=None, queue_length=10, smaller_final_batch_mode='pad', name='grid_sampler'): # override all spatial window defined in input # modalities sections # this is useful when do inference with a spatial window # which is different from the training specifications ImageWindowDataset.__init__( self, reader=reader, window_sizes=spatial_window_size or window_sizes, batch_size=batch_size, windows_per_image=1, queue_length=queue_length, shuffle=False, epoch=1, smaller_final_batch_mode=smaller_final_batch_mode, name=name) self.border_size = window_border or (0, 0, 0) assert isinstance(self.border_size, (list, tuple)), \ "window_border should be a list or tuple" while len(self.border_size) < N_SPATIAL: self.border_size = tuple(self.border_size) + \ (self.border_size[-1],) self.border_size = self.border_size[:N_SPATIAL] tf.logging.info('initialised window instance') tf.logging.info("initialised grid sampler %s", self.window.shapes)
def __init__(self, names=('vector', ), vector_size=(100, ), batch_size=10, n_interpolations=10, mean=0.0, stddev=1.0, repeat=1, queue_length=10, name='random_vector_sampler'): # repeat=None for infinite loops self.n_interpolations = max(n_interpolations, 1) self.mean = mean self.stddev = stddev self.repeat = repeat self.names = names ImageWindowDataset.__init__( self, reader=None, window_sizes={names[0]: { 'spatial_window_size': vector_size }}, batch_size=batch_size, queue_length=queue_length, shuffle=False, epoch=1, smaller_final_batch_mode='drop', name=name) self.window = ImageWindow(shapes={names[0]: vector_size}, dtypes={names[0]: tf.float32}) tf.logging.info("initialised sampler output %s ", self.window.shapes)
def __init__(self, reader, window_sizes, batch_size=1, spatial_window_size=None, windows_per_image=1, shuffle=True, queue_length=10, smaller_final_batch_mode='pad', name='resize_sampler_v2'): tf.logging.info('reading size of preprocessed images') ImageWindowDataset.__init__( self, reader=reader, window_sizes=window_sizes, batch_size=batch_size, windows_per_image=windows_per_image, queue_length=queue_length, shuffle=shuffle, epoch=-1 if shuffle else 1, smaller_final_batch_mode=smaller_final_batch_mode, name=name) if spatial_window_size: # override all spatial window defined in input # modalities sections # this is useful when do inference with a spatial window # which is different from the training specifications self.window.set_spatial_shape(spatial_window_size) tf.logging.info("initialised resize sampler %s ", self.window.shapes)
def __init__(self, reader, csv_reader=None, window_sizes=None, batch_size=10, windows_per_image=1, shuffle=True, queue_length=10, num_threads=4, epoch=-1, smaller_final_batch_mode='pad', name='random_vector_sampler'): self.csv_reader = csv_reader print("assigned csv_reader") ImageWindowDataset.__init__( self, reader=reader, window_sizes=window_sizes, batch_size=batch_size, windows_per_image=windows_per_image, shuffle=shuffle, queue_length=queue_length, epoch=epoch, smaller_final_batch_mode=smaller_final_batch_mode, name=name) print("initialised IWD") self.set_num_threads(num_threads)
def __init__(self, reader, window_sizes, batch_size=1, windows_per_image=1, queue_length=10, name='uniform_sampler_v2'): ImageWindowDataset.__init__(self, reader=reader, window_sizes=window_sizes, batch_size=batch_size, windows_per_image=windows_per_image, queue_length=queue_length, shuffle=True, epoch=-1, smaller_final_batch_mode='drop', name=name) tf.logging.info("initialised uniform sampler %s ", self.window.shapes) self.window_centers_sampler = rand_spatial_coordinates
def __init__(self, reader, csv_reader=None, window_sizes=None, batch_size=10, windows_per_image=1, shuffle=True, queue_length=10, epoch=-1, smaller_final_batch_mode='pad', name='random_vector_sampler'): self.csv_reader = csv_reader ImageWindowDataset.__init__( self, reader=reader, window_sizes=window_sizes, batch_size=batch_size, windows_per_image=windows_per_image, shuffle=shuffle, queue_length=queue_length, epoch=epoch, smaller_final_batch_mode=smaller_final_batch_mode, name=name)
def __init__(self, *args, **kwargs): ImageWindowDataset.__init__(self, *args, **kwargs)