Esempio n. 1
0
    def __init__(
            self,
            filters,
            kernel_size,
            batch_size=None,  # todo: replace with a use_var option
            strides=[1, 1],
            padding='valid',
            activation=None,
            use_bias=True,
            kernel_initializer='glorot_uniform',
            bias_initializer='zeros',
            kernel_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            block_size=[16, 16],
            tol=0.5,
            avgpool=False,
            **kwargs):
        super().__init__(**kwargs)

        self.filters = filters
        self.kernel_size = utils.listify(kernel_size, 2)
        self.batch_size = batch_size
        self.use_var = batch_size is not None and not tf.executing_eagerly()
        self.strides = utils.listify(strides, 2)
        self.padding = padding
        self.activation = keras.layers.Activation(activation)
        self.use_bias = use_bias
        self.kernel_initializer = kernel_initializer
        self.bias_initializer = bias_initializer
        self.kernel_regularizer = kernel_regularizer
        self.bias_regularizer = bias_regularizer
        self.kernel_constraint = kernel_constraint
        self.bias_constraint = bias_constraint

        self.block_size = utils.listify(block_size, 2)
        self.output_block_size = [
            conv_utils.conv_output_length(self.block_size[i],
                                          self.kernel_size[i], 'valid',
                                          self.strides[i]) for i in [0, 1]
        ]

        self.block_offset = [0, 0]
        self.output_block_offset = self.block_offset

        self.block_stride = self.output_block_size
        self.output_block_stride = self.output_block_size

        self.tol = tol
        self.avgpool = avgpool

        if self.padding == 'valid':
            pad_size = [0, 0]
        else:
            pad_h = self.kernel_size[0] // 2
            pad_w = (self.kernel_size[1] - 1) // 2
            pad_size = [pad_h, pad_w]
        self.pad = keras.layers.ZeroPadding2D(pad_size)
Esempio n. 2
0
    def __init__(self,
                 block_size=[16, 16],
                 block_offset=[0, 0],
                 block_stride=[16, 16],
                 **kwargs):
        super().__init__(**kwargs)

        self.block_size = utils.listify(block_size, 2)
        self.block_offset = utils.listify(block_offset, 2)
        self.block_stride = utils.listify(block_stride, 2)
Esempio n. 3
0
    def __init__(self,
                 filters,
                 kernel_size,
                 batch_size,
                 strides=[1, 1],
                 padding='valid',
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 block_size=[16, 16],
                 tol=0.5,
                 avgpool=False,
                 **kwargs):
        super().__init__(**kwargs)

        self.filters = filters
        self.kernel_size = utils.listify(kernel_size, 2)
        self.batch_size = batch_size
        self.use_var = batch_size is not None and not tf.executing_eagerly()
        self.strides = utils.listify(strides, 2)
        self.padding = padding
        self.activation = keras.layers.Activation(activation)
        self.use_bias = use_bias
        self.kernel_initializer = kernel_initializer
        self.bias_initializer = bias_initializer
        self.kernel_regularizer = kernel_regularizer
        self.bias_regularizer = bias_regularizer
        self.kernel_constraint = kernel_constraint
        self.bias_constraint = bias_constraint

        self.block_size = utils.listify(block_size, 2)
        self.output_block_size = [
            conv_utils.deconv_output_length(self.block_size[i],
                                            self.kernel_size[i],
                                            'valid',
                                            stride=self.strides[i])
            for i in [0, 1]
        ]

        self.block_offset = [0, 0]
        self.output_block_offset = self.block_offset

        self.block_stride = self.block_size
        self.output_block_stride = self.output_block_size  # might not be correct

        self.tol = tol
        self.avgpool = avgpool
Esempio n. 4
0
    def __init__(self,
                 output_shape,
                 block_size=[16, 16],
                 block_offset=[0, 0],
                 block_stride=[16, 16],
                 use_var=False,
                 **kwargs):
        super().__init__(**kwargs)

        self.output_shape_ = list(output_shape)
        self.block_size = utils.listify(block_size, 2)
        self.block_offset = utils.listify(block_offset, 2)
        self.block_stride = utils.listify(block_stride, 2)
        self.use_var = use_var
Esempio n. 5
0
    def __init__(self,
                 block_size=[16, 16],
                 block_offset=[0, 0],
                 block_stride=[16, 16],
                 tol=0.5,
                 avgpool=False,
                 **kwargs):
        super().__init__(**kwargs)

        self.block_size = utils.listify(block_size, 2)
        self.block_offset = utils.listify(block_offset, 2)
        self.block_stride = utils.listify(block_stride, 2)
        self.tol = tol
        self.avgpool = avgpool
Esempio n. 6
0
def plot_image(*images,
               columns=10,
               ticks=True,
               scale=20,
               colorbar=False,
               cmap='gray',
               cram=False,
               **kwargs):
    cmaps = utils.listify(cmap, len(images))
    columns = min(columns, len(images))
    rows = max(1, len(images) // columns)
    fig, axes = plt.subplots(rows,
                             columns,
                             squeeze=False,
                             figsize=(scale, scale * rows / columns))
    for i, image in enumerate(images):
        ax = axes[i // columns, i % columns]
        if image is None:
            ax.axis('off')
            continue
        im = ax.imshow(np.squeeze(image), cmap=cmaps[i], **kwargs)
        if colorbar:
            fig.colorbar(im,
                         ax=ax,
                         orientation='horizontal',
                         fraction=0.046,
                         pad=0.04)
    for ax in axes.ravel():
        if not ticks:
            ax.axis('off')
        ax.set_aspect('equal')
    if cram:
        fig.subplots_adjust(wspace=0, hspace=0)
    return fig, axes
Esempio n. 7
0
def conv_upsample(inputs,
                  filters,
                  size=2,
                  activation='relu',
                  mask=None,
                  batch_size=None,
                  **kwargs):
    """Upsample the inputs in dimensions 1,2 with a transpose convolution.

  :param inputs:
  :param filters:
  :param scale: scale by which to upsample. Can be an int or a list of 2 ints,
  specifying scale in each direction.
  :param activation: relu by default
  :param mask: if not None, use a SparseConv2DTranspose layer.
  :param batch_size:

  Additional kwargs passed to the conv transpose layer.

  :returns:
  :rtype:

  """
    size = utils.listify(size, 2)
    if mask is None:
        inputs = keras.layers.Conv2DTranspose(filters,
                                              size,
                                              strides=size,
                                              padding='same',
                                              activation=activation,
                                              use_bias=False,
                                              **kwargs)(inputs)
    else:
        inputs = lay.SparseConv2DTranspose(filters,
                                           size,
                                           batch_size=batch_size,
                                           strides=size,
                                           padding='same',
                                           activation=activation,
                                           use_bias=False,
                                           **kwargs)([inputs, mask])
    return inputs
Esempio n. 8
0
    def __init__(self,
                 *,
                 base_shape,
                 level_filters,
                 num_channels,
                 pose_dim,
                 level_depth=2,
                 dropout=0.5,
                 **kwargs):
        """Create a U-Net model for object detection.

    Regresses a distance proxy at every level for multi-scale tracking. Model
    output consists first of the `pose_dim`-channel pose image, followed by
    multi-scale fields from smallest (lowest on the U) to largest (original
    image dimension).

    :param base_shape: the height/width of the output of the first layer in the lower
    level. This determines input and output tile shapes. Can be a tuple,
    specifying different height/width, or a single integer.
    :param level_filters: number of filters at each level (bottom to top).
    :param level_depth: number of layers per level
    :param dropout: dropout to use for concatenations
    :param num_channels: number of channels in the input
    :param pose_dim:

    """
        self.base_shape = utils.listify(base_shape, 2)
        self.level_filters = level_filters
        self.num_channels = num_channels
        self.pose_dim = pose_dim
        self.level_depth = level_depth
        self.dropout = dropout

        self.num_levels = len(self.level_filters)
        self.input_tile_shape = self.compute_input_tile_shape()
        self.output_tile_shapes = self.compute_output_tile_shapes()
        self.output_tile_shape = self.output_tile_shapes[-1]

        super().__init__(self.input_tile_shape + [self.num_channels], **kwargs)
Esempio n. 9
0
    def __init__(self,
                 *,
                 batch_size=None,
                 block_size=[8, 8],
                 tol=0.5,
                 **kwargs):
        """Create a UNet-like architecture using multi-scale tracking.

    :param batch_size: determines whether variables will be used in sparse
    layers for the scatter operation.
    :param block_size: width/height of the blocks used for sparsity, at the
    scale of the original resolution (resized at each level. These are rescaled
    at each level.
    :param 8]:
    :param tol: absolute threshold value for sbnet attention.
    :returns:
    :rtype:

    """
        super().__init__(**kwargs)
        self.batch_size = batch_size
        self.block_size = utils.listify(block_size, 2)
        self.tol = tol
Esempio n. 10
0
    def __init__(
            self,
            *,  # pylint: disable=too-many-statements
            commands,
            data_root,
            model_root,
            overwrite,
            deep,
            figs_dir,
            convert_mode,
            transformation,
            identity_prob,
            priority_mode,
            labeled,
            annotation_mode,
            record_size,
            annotation_delay,
            image_shape,
            data_size,
            test_size,
            batch_size,
            num_objects,
            pose_dim,
            num_shuffle,
            base_shape,
            level_filters,
            level_depth,
            model,
            multiscale,
            use_var,
            dropout,
            initial_epoch,
            epochs,
            learning_rate,
            tol,
            num_parallel_calls,
            verbose,
            keras_verbose,
            eager,
            show,
            cache,
            seconds):
        # main
        self.commands = commands

        # file settings
        self.data_root = data_root
        self.model_root = model_root
        self.overwrite = overwrite
        self.deep = deep
        self.figs_dir = figs_dir

        # data settings
        self.convert_modes = utils.listwrap(convert_mode)
        self.transformation = transformation
        self.identity_prob = identity_prob
        self.priority_mode = priority_mode
        self.labeled = labeled

        # annotation settings
        self.annotation_mode = annotation_mode
        self.record_size = record_size
        self.annotation_delay = annotation_delay

        # data sizes/settings
        self.image_shape = image_shape
        self.data_size = data_size
        self.test_size = test_size
        self.batch_size = batch_size
        self.num_objects = num_objects
        self.pose_dim = pose_dim
        self.num_shuffle = num_shuffle

        # model architecture
        self.base_shape = utils.listify(base_shape, 2)
        self.level_filters = level_filters
        self.level_depth = level_depth

        # model type settings
        self.model = model
        self.multiscale = multiscale
        self.use_var = use_var

        # hyperparameters
        self.dropout = dropout
        self.initial_epoch = initial_epoch
        self.epochs = epochs
        self.learning_rate = learning_rate
        self.tol = tol

        # runtime settings
        self.num_parallel_calls = num_parallel_calls
        self.verbose = verbose
        self.keras_verbose = keras_verbose
        self.eager = eager
        self.show = show
        self.cache = cache
        self.seconds = seconds

        # globals
        log.set_verbosity(self.verbose)
        _set_eager(self.eager)
        vis.set_show(self.show)
        self._set_num_parallel_calls()

        # derived sizes/shapes
        self.num_levels = len(self.level_filters)
        self.input_tile_shape = mod.UNet.compute_input_tile_shape_(
            self.base_shape, self.num_levels, self.level_depth)
        self.output_tile_shapes = mod.UNet.compute_output_tile_shapes_(
            self.base_shape, self.num_levels, self.level_depth)
        self.output_tile_shape = self.output_tile_shapes[-1]
        self.num_tiles = dat.ArtificeData.compute_num_tiles(
            self.image_shape, self.output_tile_shape)

        # derived model subdirs/paths
        self.cache_dir = join(self.model_root, 'cache')
        self.annotation_info_path = join(self.model_root,
                                         'annotation_info.pkl')
        self.annotated_dir = join(self.model_root,
                                  'annotated')  # model-dependent

        # ensure directories exist
        _ensure_dirs_exist([
            self.data_root, self.model_root, self.figs_dir, self.cache_dir,
            self.annotated_dir
        ])