Exemple #1
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(LocallyConnected1D, self).__init__(**kwargs)
   self.filters = filters
   self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   if self.padding != 'valid':
     raise ValueError('Invalid border mode for LocallyConnected1D '
                      '(only "valid" is supported): ' + padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.activation = activations.get(activation)
   self.use_bias = use_bias
   self.kernel_initializer = initializers.get(kernel_initializer)
   self.bias_initializer = initializers.get(bias_initializer)
   self.kernel_regularizer = regularizers.get(kernel_regularizer)
   self.bias_regularizer = regularizers.get(bias_regularizer)
   self.activity_regularizer = regularizers.get(activity_regularizer)
   self.kernel_constraint = constraints.get(kernel_constraint)
   self.bias_constraint = constraints.get(bias_constraint)
   self.input_spec = InputSpec(ndim=3)
  def __init__(self,
               filters,
               kernel_size,
               strides=(1, 1),
               padding='valid',
               data_format=None,
               dilation_rate=(1, 1),
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               use_bias=True,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               dropout=0.,
               recurrent_dropout=0.,
               **kwargs):
    super(ConvLSTM2DCell, self).__init__(**kwargs)
    self.filters = filters
    self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
    self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
    self.padding = conv_utils.normalize_padding(padding)
    self.data_format = conv_utils.normalize_data_format(data_format)
    self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
                                                    'dilation_rate')
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.use_bias = use_bias

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.dropout = min(1., max(0., dropout))
    self.recurrent_dropout = min(1., max(0., recurrent_dropout))
    self.state_size = (self.filters, self.filters)
    self._dropout_mask = None
    self._recurrent_dropout_mask = None
Exemple #3
0
 def __init__(self, pool_function, pool_size, strides,
              padding='valid', data_format='channels_last',
              name=None, **kwargs):
   super(Pooling1D, self).__init__(name=name, **kwargs)
   if data_format is None:
     data_format = backend.image_data_format()
   if strides is None:
     strides = pool_size
   self.pool_function = pool_function
   self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.input_spec = InputSpec(ndim=3)
  def test_normalize_tuple(self):
    self.assertEqual((2, 2, 2),
                     conv_utils.normalize_tuple(2, n=3, name='strides'))
    self.assertEqual((2, 1, 2),
                     conv_utils.normalize_tuple((2, 1, 2), n=3, name='strides'))

    with self.assertRaises(ValueError):
      conv_utils.normalize_tuple((2, 1), n=3, name='strides')

    with self.assertRaises(ValueError):
      conv_utils.normalize_tuple(None, n=3, name='strides')
Exemple #5
0
    def init_layers(self, input_shape):
        conv_layer_type = self.get_conv_layer_type()
        for i in range(self.depth):
            strides = self.strides if (i == 0) else 1
            kernel_initializer = self.kernel_initializer if (i == 0) else tf.zeros_initializer
            conv_layer = conv_layer_type(filters=self.filters,
                                         kernel_size=self.kernel_size,
                                         strides=strides,
                                         padding="same",
                                         data_format=self.data_format,
                                         dilation_rate=self.dilation_rate,
                                         use_bias=False,
                                         kernel_initializer=kernel_initializer,
                                         kernel_regularizer=self.kernel_regularizer,
                                         activity_regularizer=self.activity_regularizer,
                                         kernel_constraint=self.kernel_constraint,
                                         bias_constraint=self.bias_constraint)
            self.conv_layers.append(conv_layer)

        if self.use_projection(input_shape):
            projection_kernel_size = conv_utils.normalize_tuple(1, self.rank, "projection_kernel_size")
            projection_kernel_initializer = VarianceScaling()
            self.projection_layer = conv_layer_type(filters=self.filters,
                                                    kernel_size=projection_kernel_size,
                                                    strides=self.strides,
                                                    padding="same",
                                                    data_format=self.data_format,
                                                    dilation_rate=self.dilation_rate,
                                                    use_bias=False,
                                                    kernel_initializer=projection_kernel_initializer,
                                                    kernel_regularizer=self.kernel_regularizer,
                                                    activity_regularizer=self.activity_regularizer,
                                                    kernel_constraint=self.kernel_constraint,
                                                    bias_constraint=self.bias_constraint)
        self._layers = copy(self.conv_layers)
        if self.projection_layer is not None:
            self._layers.append(self.projection_layer)
Exemple #6
0
    def __init__(self, filter_size=3, strides=2, padding='valid', **kwargs):
        rank = 2
        self.filter_size = filter_size
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)

        if self.filter_size == 1:
            self.a = np.array([
                1.,
            ])
        elif self.filter_size == 2:
            self.a = np.array([1., 1.])
        elif self.filter_size == 3:
            self.a = np.array([1., 2., 1.])
        elif self.filter_size == 4:
            self.a = np.array([1., 3., 3., 1.])
        elif self.filter_size == 5:
            self.a = np.array([1., 4., 6., 4., 1.])
        elif self.filter_size == 6:
            self.a = np.array([1., 5., 10., 10., 5., 1.])
        elif self.filter_size == 7:
            self.a = np.array([1., 6., 15., 20., 15., 6., 1.])

        super(Blur2D, self).__init__(**kwargs)
Exemple #7
0
def sample_label_matrix(y, window_size=(30, 30), padding='valid',
                        max_training_examples=1e7, data_format=None):
    """Sample a 4D Tensor, creating many small images of shape `window_size`.

    Args:
        y: label masks with the same shape as `X` data
        window_size: size of window around each pixel to sample
        padding: padding type `valid` or `same`
        max_training_examples: max number of samples per class
        data_format: `channels_first` or `channels_last`

    Returns:
        4 arrays of coordinates of each sampled pixel
    """
    data_format = conv_utils.normalize_data_format(data_format)
    is_channels_first = data_format == 'channels_first'
    if is_channels_first:
        num_dirs, num_features, image_size_x, image_size_y = y.shape
    else:
        num_dirs, image_size_x, image_size_y, num_features = y.shape

    window_size = conv_utils.normalize_tuple(window_size, 2, 'window_size')
    window_size_x, window_size_y = window_size

    feature_rows, feature_cols, feature_batch, feature_label = [], [], [], []

    for direc in range(num_dirs):
        for k in range(num_features):
            if is_channels_first:
                feature_rows_temp, feature_cols_temp = np.where(y[direc, k, :, :] == 1)
            else:
                feature_rows_temp, feature_cols_temp = np.where(y[direc, :, :, k] == 1)

            # Check to make sure the features are actually present
            if not feature_rows_temp.size > 0:
                continue

            # Randomly permute index vector
            non_rand_ind = np.arange(len(feature_rows_temp))
            rand_ind = np.random.choice(non_rand_ind, size=len(feature_rows_temp), replace=False)

            for i in rand_ind:
                condition = padding == 'valid' and \
                    feature_rows_temp[i] - window_size_x > 0 and \
                    feature_rows_temp[i] + window_size_x < image_size_x and \
                    feature_cols_temp[i] - window_size_y > 0 and \
                    feature_cols_temp[i] + window_size_y < image_size_y

                if padding == 'same' or condition:
                    feature_rows.append(feature_rows_temp[i])
                    feature_cols.append(feature_cols_temp[i])
                    feature_batch.append(direc)
                    feature_label.append(k)

    # Randomize
    non_rand_ind = np.arange(len(feature_rows), dtype='int32')
    if not max_training_examples:
        max_training_examples = non_rand_ind.size
    else:
        max_training_examples = int(max_training_examples)

    limit = min(non_rand_ind.size, max_training_examples)
    rand_ind = np.random.choice(non_rand_ind, size=limit, replace=False)

    feature_rows = np.array(feature_rows, dtype='int32')[rand_ind]
    feature_cols = np.array(feature_cols, dtype='int32')[rand_ind]
    feature_batch = np.array(feature_batch, dtype='int32')[rand_ind]
    feature_label = np.array(feature_label, dtype='int32')[rand_ind]

    return feature_rows, feature_cols, feature_batch, feature_label
Exemple #8
0
 def __init__(self, size=(2, 2), **kwargs):
     super(MaxUnpooling2D, self).__init__(**kwargs)
     self.size = conv_utils.normalize_tuple(size, 2, 'size')
Exemple #9
0
    def __init__(self,
                 filters,
                 kernel_size,
                 cov_kernel_size=(3, 1),
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 **kwargs):
        super(ConvLSTM2DCell_2, self).__init__(**kwargs)
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2,
                                                      'kernel_size')
        #############################
        self.cov_kernel_size = cov_kernel_size
        self.kernel_size_1 = conv_utils.normalize_tuple(
            cov_kernel_size, 2, 'kernel_size')
        ##################################
        self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, 2, 'dilation_rate')
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        ###############################
        self.cov_kernel_initializer = initializers.get(kernel_initializer)
        ##########################
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.state_size = (self.filters, self.filters)
    def __init__(self,
                 dilations,
                 filters,
                 kernel_size,
                 activation=None,
                 attention_kernel_size=(3, 3, 3),
                 attention_filters=None,
                 attention_activation=tf.nn.relu,
                 use_bn=True,
                 **kwargs):
        """
        dilations : (list of ints, list of 2-tuples of ints)
            List of dilation rates to process on the input in parallel
        filters: (int)
            Number of filters of the dilated convolutions
        kernel_size: (int, 2-tuple of intes)
            Kernel dim in dilated convolution layers
        activation: (string, func)
            Activation function to apply to final Autofocus layer output
        attention_kernel_size : (int, 2-tuple of intes)
            Kernel dim in attention conv layer 1
        attention_filters : (int, None)
            Number of filters in attention conv layer 1, uses 'filters' // 2
            if not specified
        attention_activation: (func)
            TF activation function to apply after attention conv layer 1
        use_bn : (bool)
            Apply batch normalization after dilated convolutions
        kwargs : (dict)
            Passed to tf.keras.layers.Conv3D, for instance...
            -  kernel_initializer='glorot_uniform'
            -  bias_initializer='zeros'
            -  kernel_regularizer=None
            -  bias_regularizer=None
            -  activity_regularizer=None
            -  kernel_constraint=None
            -  bias_constraint=None

            NOTE: These passed parameters are currently used for all
            convolutions across the layer - both in the dilated network
            and attention layer
        """
        # Assert parameters passed are compatible
        if kwargs.get("padding") and kwargs["padding"].upper() != "SAME":
            raise NotImplementedError("Only implemented for padding 'SAME'")
        if kwargs.get("dilation_rate"):
            raise ValueError("Should not pass arguments to 'dilation_rate'. "
                             "Pass a list to 'dilations' instead.")
        kwargs["dilation_rate"] = (1, 1, 1)
        kwargs["padding"] = "SAME"

        # Init base tf 3D Conv class
        super(Autofocus3D, self).__init__(filters=filters,
                                          kernel_size=kernel_size,
                                          activation=activation,
                                          **kwargs)

        # Use batch norm in dilation network?
        self.use_bn = use_bn

        # Attributes for attention network
        self.attention_filters = attention_filters or self.filters // 2
        self.attention_kernel_size = conv_utils.normalize_tuple(
            attention_kernel_size, self.rank, 'kernel_size')
        self.attention_activation = attention_activation

        # Dilations
        self.dilations = [
            conv_utils.normalize_tuple(d, self.rank, 'dilation_rate')
            for d in dilations
        ]
        self.conv_ops = []
        self.attention_ops = []
Exemple #11
0
    def call(self, inputs):
        if self.data_format == 'channels_first':
            inputs = K.permute_dimensions(inputs, pattern=[0, 2, 3, 4, 1])

        padding_input = self.padding.upper()
        dilation_rate = conv_utils.normalize_tuple(
            self.dilation_rate, 3, 'dilation_rate')

        if self.padding == 'valid':
            outputs = tf.nn.pool(inputs,
                                 window_shape=self.pool_size,
                                 pooling_type='MAX',
                                 padding=padding_input,
                                 dilation_rate=dilation_rate,
                                 strides=self.strides,
                                 data_format='NDHWC')
        elif self.padding == 'same':
            input_shape = K.int_shape(inputs)
            times = input_shape[1]
            rows = input_shape[2]
            cols = input_shape[3]

            times_unpadded = conv_utils.conv_output_length(
                times, self.pool_size[0],
                padding='valid',
                stride=self.strides[0],
                dilation=self.dilation_rate)

            rows_unpadded = conv_utils.conv_output_length(
                rows, self.pool_size[1],
                padding='valid',
                stride=self.strides[0],
                dilation=self.dilation_rate)

            cols_unpadded = conv_utils.conv_output_length(
                cols, self.pool_size[2],
                padding='valid',
                stride=self.strides[1],
                dilation=self.dilation_rate)

            t_pad = (times - times_unpadded) // 2
            w_pad = (rows - rows_unpadded) // 2
            h_pad = (cols - cols_unpadded) // 2

            t_pad = (t_pad, t_pad)
            w_pad = (w_pad, w_pad)
            h_pad = (h_pad, h_pad)

            pattern = [[0, 0], list(t_pad), list(w_pad), list(h_pad), [0, 0]]

            # Pad the image
            outputs = tf.pad(inputs, pattern, mode='REFLECT')

            # Perform pooling
            outputs = tf.nn.pool(inputs,
                                 window_shape=self.pool_size,
                                 pooling_type='MAX',
                                 padding='VALID',
                                 dilation_rate=dilation_rate,
                                 strides=self.strides,
                                 data_format='NDHWC')

        if self.data_format == 'channels_first':
            outputs = K.permute_dimensions(outputs, pattern=[0, 4, 1, 2, 3])

        return outputs
Exemple #12
0
    def __init__(self,
                 rank,
                 kernel_size,
                 growth_rate,
                 depth,
                 output_filters=None,
                 use_bottleneck=True,
                 bottleneck_filters_multiplier=4,
                 use_batch_normalization=True,
                 data_format=None,
                 activation="relu",
                 use_bias=True,
                 kernel_initializer="he_normal",
                 bias_initializer="zeros",
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        if rank not in [1, 2, 3]:
            raise ValueError(
                "`rank` must be in [1, 2, 3]. Got {}".format(rank))

        super(DenseBlockND, self).__init__(**kwargs)

        self.rank = rank
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      "kernel_size")
        self.output_filters = output_filters
        self.growth_rate = growth_rate

        if use_bottleneck:
            if (depth % 2) != 0:
                raise ValueError(
                    "Depth must be a multiple of 2 when using bottlenecks. Got {}."
                    .format(depth))

        self._depth = depth // 2 if use_bottleneck else depth
        self.use_bottleneck = use_bottleneck
        self.bottleneck_filters_multiplier = bottleneck_filters_multiplier

        self.use_batch_normalization = use_batch_normalization

        self.data_format = conv_utils.normalize_data_format(data_format)
        self.channel_axis = -1 if self.data_format == "channels_last" else 1

        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.composite_function_blocks: Optional[
            List[CompositeFunctionBlock]] = None
        self.transition_layer = None

        self.input_spec = InputSpec(ndim=self.rank + 2)
Exemple #13
0
    def call(self, inputs, argmax, spatial_output_shape):

        # standardize spatial_output_shape
        spatial_output_shape = conv_utils.normalize_tuple(
            spatial_output_shape, 2, 'spatial_output_shape')

        # getting input shape
        # input_shape = tf.shape(inputs)
        # input_shape = inputs.get_shape().as_list()
        input_shape = tf.shape(inputs)

        # checking if spatial shape is ok
        if self.data_format == 'channels_last':
            output_shape = (input_shape[0],) + \
                spatial_output_shape + (input_shape[3],)

            # assert output_shape[1] * output_shape[2] * output_shape[
            #     3] > tf.math.reduce_max(argmax).numpy(), "HxWxC <= Max(argmax)"
        else:
            output_shape = (input_shape[0],
                            input_shape[1]) + spatial_output_shape
            # assert output_shape[1] * output_shape[2] * output_shape[
            #     3] > tf.math.reduce_max(argmax).numpy(), "CxHxW <= Max(argmax)"

        # N * H_in * W_in * C
        # flat_input_size = tf.reduce_prod(input_shape)
        flat_input_size = tf.reduce_prod(input_shape)

        # flat output_shape = [N, H_out * W_out * C]
        flat_output_shape = [
            output_shape[0],
            output_shape[1] * output_shape[2] * output_shape[3]
        ]

        # flatten input tensor for the use in tf.scatter_nd
        inputs_ = tf.reshape(inputs, [flat_input_size])

        # create the tensor [ [[[0]]], [[[1]]], ..., [[[N-1]]] ]
        # corresponding to the batch size but transposed in 4D
        batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64),
                                          dtype=argmax.dtype),
                                 shape=[input_shape[0], 1, 1, 1])

        # b is a tensor of size (N, H, W, C) or (N, C, H, W) whose
        # first element of the batch are 3D-array full of 0, ...
        # second element of the batch are 3D-array full of 1, ...
        b = tf.ones_like(argmax) * batch_range
        b = tf.reshape(b, [flat_input_size, 1])

        # argmax_ = [ [0, argmax_1], [0, argmax_2], ... [0, argmax_k], ...,
        # [N-1, argmax_{N*H*W*C}], [N-1, argmax_{N*H*W*C-1}] ]
        argmax_ = tf.reshape(argmax, [flat_input_size, 1])
        argmax_ = tf.concat([b, argmax_], axis=-1)

        # reshaping output tensor
        ret = tf.scatter_nd(argmax_,
                            inputs_,
                            shape=tf.cast(flat_output_shape, tf.int64))
        ret = tf.reshape(ret, output_shape)

        return ret
Exemple #14
0
 def __init__(self, size=(2, 2), data_format=None, **kwargs):
     super(PixelShuffler, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.size = conv_utils.normalize_tuple(size, 2, 'size')
Exemple #15
0
    def __init__(self,
                 rank,
                 filters,
                 kernel_size,
                 param_reduction=0.5,
                 form='diagonal',
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 groups=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 conv_op=None,
                 **kwargs):
        super(SzConv, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank

        if isinstance(filters, float):
            filters = int(filters)
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')

        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)

        self._validate_init()
        self._is_causal = self.padding == 'causal'
        self._channels_first = self.data_format == 'channels_first'
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)

        self.reduction_sv = param_reduction
        self.form = form
        self.num_ones = 0
        self.num_weights = 0
        self.reduced_ratio = 0
        self.halfbandwidth = 0
Exemple #16
0
    def build(self, input_shape, **kwargs):

        if self.data_format == 'channels_first':
            channel_axis = 1
            space = input_shape[2:]
            new_space = []
            for i in range(len(space)):
                new_dim = conv_utils.conv_output_length(
                    space[i],
                    self.kernel_size[i],
                    padding=self.padding,
                    stride=self.strides[i],
                    dilation=self.dilation_rate[i])
                new_space.append(new_dim)
            self.output_size = tensor_shape.TensorShape(
                [input_shape[0], self.filters] + new_space)

        else:
            channel_axis = -1
            space = input_shape[2:-1]
            new_space = []
            for i in range(len(space)):
                new_dim = conv_utils.conv_output_length(
                    space[i],
                    self.kernel_size[i],
                    padding=self.padding,
                    stride=self.strides[i],
                    dilation=self.dilation_rate[i])
                new_space.append(new_dim)
            self.output_size = tensor_shape.TensorShape([input_shape[0]] +
                                                        new_space +
                                                        [self.filters])

        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]

        shape_x = self.kernel_size + (input_dim, self.filters * 7)
        shape_m = self.kernel_size + (self.filters, self.filters * 4)
        shape_h = self.kernel_size + (self.filters, self.filters * 4)
        shape_c = self.kernel_size + (self.filters, self.filters)
        shape_1by1 = conv_utils.normalize_tuple(1, 2, 'kernel_size') +\
            (self.filters * 2, self.filters)
        self.kernel_shape = shape_x

        self.kernel_x = self.add_weight(shape=shape_x,
                                        initializer=self.kernel_initializer,
                                        name='kernel_x',
                                        regularizer=self.kernel_regularizer,
                                        constraint=self.kernel_constraint)
        self.kernel_m = self.add_weight(shape=shape_m,
                                        initializer=self.recurrent_initializer,
                                        name='kernel_m',
                                        regularizer=self.recurrent_regularizer,
                                        constraint=self.recurrent_constraint)
        self.kernel_h = self.add_weight(shape=shape_h,
                                        initializer=self.recurrent_initializer,
                                        name='kernel_h',
                                        regularizer=self.recurrent_regularizer,
                                        constraint=self.recurrent_constraint)
        self.kernel_c = self.add_weight(shape=shape_c,
                                        initializer=self.recurrent_initializer,
                                        name='kernel_c',
                                        regularizer=self.recurrent_regularizer,
                                        constraint=self.recurrent_constraint)
        self.kernel_1by1 = self.add_weight(shape=shape_1by1,
                                           initializer=self.kernel_initializer,
                                           name='kernel_1by1',
                                           regularizer=self.kernel_regularizer,
                                           constraint=self.kernel_constraint)

        if self.use_bias:
            if self.unit_forget_bias:

                def bias_initializer(_, *args, **kwargs):
                    return K.concatenate([
                        self.bias_initializer((self.filters, ), *args,
                                              **kwargs),
                        initializers.Ones()((self.filters, ), *args, **kwargs),
                        self.bias_initializer((self.filters * 2, ), *args,
                                              **kwargs),
                        initializers.Ones()((self.filters, ), *args, **kwargs),
                        self.bias_initializer((self.filters * 2, ), *args,
                                              **kwargs)
                    ])
            else:
                bias_initializer = self.bias_initializer
            self.bias = self.add_weight(shape=(self.filters * 7, ),
                                        name='bias',
                                        initializer=bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)

        else:
            self.bias = None

        self.kernel_xi = self.kernel_x[:, :, :, :self.filters]
        self.kernel_xf = self.kernel_x[:, :, :, self.filters:self.filters * 2]
        self.kernel_xc = self.kernel_x[:, :, :,
                                       self.filters * 2:self.filters * 3]

        self.kernel_hi = self.kernel_h[:, :, :, :self.filters]
        self.kernel_hf = self.kernel_h[:, :, :, self.filters:self.filters * 2]
        self.kernel_hc = self.kernel_h[:, :, :,
                                       self.filters * 2:self.filters * 3]

        self.kernel_xip = self.kernel_x[:, :, :,
                                        self.filters * 3:self.filters * 4]
        self.kernel_xfp = self.kernel_x[:, :, :,
                                        self.filters * 4:self.filters * 5]
        self.kernel_xm = self.kernel_x[:, :, :,
                                       self.filters * 5:self.filters * 6]

        self.kernel_mi = self.kernel_m[:, :, :, :self.filters]
        self.kernel_mf = self.kernel_m[:, :, :, self.filters:self.filters * 2]
        self.kernel_mm = self.kernel_m[:, :, :,
                                       self.filters * 2:self.filters * 3]

        self.kernel_xo = self.kernel_x[:, :, :, self.filters * 6:]
        self.kernel_ho = self.kernel_h[:, :, :, self.filters * 3:]
        self.kernel_co = self.kernel_c
        self.kernel_mo = self.kernel_m[:, :, :, self.filters * 3:]

        if self.use_bias:
            self.bias_i = self.bias[:self.filters]
            self.bias_f = self.bias[self.filters:self.filters * 2]
            self.bias_c = self.bias[self.filters * 2:self.filters * 3]
            self.bias_ip = self.bias[self.filters * 3:self.filters * 4]
            self.bias_fp = self.bias[self.filters * 4:self.filters * 5]
            self.bias_m = self.bias[self.filters * 5:self.filters * 6]
            self.bias_o = self.bias[self.filters * 6:]
        else:
            self.bias_i = None
            self.bias_f = None
            self.bias_c = None
            self.bias_ip = None
            self.bias_fp = None
            self.bias_m = None
            self.bias_o = None

        self.built = True
Exemple #17
0
    def __init__(self,
                 train_dict,
                 movie_data_generator,
                 batch_size=32,
                 shuffle=False,
                 transform=None,
                 transform_kwargs={},
                 balance_classes=False,
                 max_class_samples=None,
                 window_size=(30, 30, 5),
                 seed=None,
                 data_format='channels_last',
                 save_to_dir=None,
                 save_prefix='',
                 save_format='png'):
        X, y = train_dict['X'], train_dict['y']
        if y is not None and X.shape[0] != y.shape[0]:
            raise ValueError('`X` (movie data) and `y` (labels) '
                             'should have the same size. Found '
                             'Found x.shape = {}, y.shape = {}'.format(
                                 X.shape, y.shape))
        self.channel_axis = 4 if data_format == 'channels_last' else 1
        self.time_axis = 1 if data_format == 'channels_last' else 2
        self.x = np.asarray(X, dtype=K.floatx())
        y = _transform_masks(y,
                             transform,
                             data_format=data_format,
                             **transform_kwargs)

        if self.x.ndim != 5:
            raise ValueError(
                'Input data in `SampleMovieArrayIterator` '
                'should have rank 5. You passed an array '
                'with shape', self.x.shape)

        window_size = conv_utils.normalize_tuple(window_size, 3, 'window_size')

        pixels_z, pixels_x, pixels_y, batch, y = sample_label_movie(
            y=y,
            padding='valid',
            window_size=window_size,
            max_training_examples=None,
            data_format=data_format)

        self.y = y
        self.win_x = window_size[0]
        self.win_y = window_size[1]
        self.win_z = window_size[2]
        self.pixels_x = pixels_x
        self.pixels_y = pixels_y
        self.pixels_z = pixels_z
        self.batch = batch
        self.movie_data_generator = movie_data_generator
        self.data_format = data_format
        self.save_to_dir = save_to_dir
        self.save_prefix = save_prefix
        self.save_format = save_format

        self.class_balance(max_class_samples, balance_classes, seed=seed)

        self.y = to_categorical(self.y).astype('int32')
        super(SampleMovieArrayIterator, self).__init__(len(self.y), batch_size,
                                                       shuffle, seed)
Exemple #18
0
 def dilation_rate(self) -> Tuple:
     if not isinstance(self._dilation_rate, (tuple, list)):
         self._dilation_rate = conv_utils.normalize_tuple(self._dilation_rate, self.rank, "dilation_rate")
     return self._dilation_rate
Exemple #19
0
 def strides(self) -> Tuple:
     if not isinstance(self._strides, (tuple, list)):
         self._strides = conv_utils.normalize_tuple(self._strides, self.rank, "strides")
     return self._strides
Exemple #20
0
 def kernel_size(self) -> Tuple:
     if not isinstance(self._kernel_size, (tuple, list)):
         self._kernel_size = conv_utils.normalize_tuple(self._kernel_size, self.rank, "kernel_size")
     return self._kernel_size
Exemple #21
0
 def projection_kernel_size(self):
     return conv_utils.normalize_tuple(1, self.rank,
                                       "projection_kernel_size")
Exemple #22
0
    def __init__(
            self,
            rank,
            filters,
            kernel_size,
            strides=1,
            padding="valid",
            data_format=None,
            dilation_rate=1,
            activation=None,
            use_bias=True,
            normalize_weight=False,
            kernel_initializer="complex",
            bias_initializer="zeros",
            gamma_diag_initializer=sqrt_init,
            gamma_off_initializer="zeros",
            kernel_regularizer=None,
            bias_regularizer=None,
            gamma_diag_regularizer=None,
            gamma_off_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            gamma_diag_constraint=None,
            gamma_off_constraint=None,
            init_criterion="he",
            seed=None,
            spectral_parametrization=False,
            transposed=False,
            epsilon=1e-7,
            **kwargs):
        super(ComplexConv, self).__init__(**kwargs)
        self.rank = rank
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, rank, "kernel_size"
        )
        self.strides = conv_utils.normalize_tuple(strides, rank, "strides")
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = "channels_last" \
            if rank == 1 else conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, "dilation_rate"
        )
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.normalize_weight = normalize_weight
        self.init_criterion = init_criterion
        self.spectral_parametrization = spectral_parametrization
        self.transposed = transposed
        self.epsilon = epsilon
        self.kernel_initializer = sanitizedInitGet(kernel_initializer)
        self.bias_initializer = sanitizedInitGet(bias_initializer)
        self.gamma_diag_initializer = sanitizedInitGet(gamma_diag_initializer)
        self.gamma_off_initializer = sanitizedInitGet(gamma_off_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer)
        self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.gamma_diag_constraint = constraints.get(gamma_diag_constraint)
        self.gamma_off_constraint = constraints.get(gamma_off_constraint)
        if seed is None:
            self.seed = np.random.randint(1, 10e6)
        else:
            self.seed = seed
        self.input_spec = InputSpec(ndim=self.rank + 2)

        # The following are initialized later
        self.kernel_shape = None
        self.kernel = None
        self.gamma_rr = None
        self.gamma_ii = None
        self.gamma_ri = None
        self.bias = None
Exemple #23
0
    def __init__(
            self,
            filters,
            kernel_size,
            feature_number,  # ????feature
            strides=(1, 1),
            padding='valid',
            data_format=None,
            dilation_rate=(1, 1),
            activation='tanh',
            recurrent_activation='hard_sigmoid',
            conv_activation='hard_sigmoid',
            convolutional_type="early",
            use_bias=True,
            kernel_initializer='glorot_uniform',
            recurrent_initializer='orthogonal',
            bias_initializer='zeros',
            unit_forget_bias=True,
            kernel_regularizer=None,
            recurrent_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            recurrent_constraint=None,
            bias_constraint=None,
            dropout=0.,
            recurrent_dropout=0.,
            **kwargs):
        """
        filters : A list , Specifies the number of filters in each layer, e.g. [10,10]
        kernel_size : A List , Same length as filters, Window size for 1D convolution e.g. [3,3]    # ????feature
        feature_number: int , Number of multiple time series e.g 28 sensors -->  28 
        recurrent_activation : A str List, Specifies the tupe of activation functions
        
        
        """

        super(ConvLSTM1DCell, self).__init__(**kwargs)

        self.number_of_layer = len(filters)

        self.out_feature_number = feature_number
        self.convolutional_type = convolutional_type

        # =============   Each layer has different parameters    ======================
        self.filters = filters
        self.conv_layer_number = len(filters)

        self.kernel_size = []

        for index, size in enumerate(kernel_size):
            if self.convolutional_type[index] == "hybrid":
                self.kernel_size.append(
                    conv_utils.normalize_tuple((size, 1), 2, 'kernel_size'))
            if self.convolutional_type[index] == "early":
                self.kernel_size.append(
                    conv_utils.normalize_tuple((size, feature_number), 2,
                                               'kernel_size'))
                self.out_feature_number = 1
                feature_number = 1

        self.recurrent_activation = []
        for acti in recurrent_activation:
            self.recurrent_activation.append(activations.get(acti))

        self.conv_activation = []
        for acti in conv_activation:
            self.conv_activation.append(activations.get(acti))

        self.state_size = (self.filters[-1], self.filters[-1])

        # =============   Each layer has the same parameter   ======================
        self.strides = conv_utils.normalize_tuple(strides, 2,
                                                  'strides')  # (1,1)
        self.padding = conv_utils.normalize_padding(padding)  # valid
        self.data_format = conv_utils.normalize_data_format(
            data_format)  # None --- -1
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, 2, 'dilation_rate')
        self.activation = activations.get(activation)  # tanh default
        self.use_bias = use_bias  # True
        self.kernel_initializer = initializers.get(
            kernel_initializer)  # glorot_uniform
        self.recurrent_initializer = initializers.get(
            recurrent_initializer)  # orthogonal
        self.bias_initializer = initializers.get(bias_initializer)  # zeros
        self.unit_forget_bias = unit_forget_bias  # True

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
    def __init__(self,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 depth_multiplier=1,
                 data_format=None,
                 activation=None,
                 use_bias=True,
                 depthwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(DepthwiseConv2D,
              self).__init__(filters=None,
                             kernel_size=kernel_size,
                             strides=strides,
                             padding=padding,
                             data_format=data_format,
                             activation=activation,
                             use_bias=use_bias,
                             bias_regularizer=bias_regularizer,
                             activity_regularizer=activity_regularizer,
                             bias_constraint=bias_constraint,
                             **kwargs)

        self.rank = 2
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, self.rank,
                                                      'kernel_size')
        if self.kernel_size[0] != self.kernel_size[1]:
            raise NotImplementedError(
                "TF Encrypted currently only supports same "
                "stride along the height and the width."
                "You gave: {}".format(self.kernel_size))
        self.strides = conv_utils.normalize_tuple(strides, self.rank,
                                                  'strides')
        self.padding = conv_utils.normalize_padding(padding).upper()
        self.depth_multiplier = depth_multiplier
        self.data_format = conv_utils.normalize_data_format(data_format)
        if activation is not None:
            logger.info(
                "Performing an activation before a pooling layer can result "
                "in unnecessary performance loss. Check model definition in "
                "case of missed optimization.")
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.depthwise_initializer = initializers.get(depthwise_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        # Not implemented arguments
        default_args_check(depthwise_regularizer, "depthwise_regularizer",
                           "DepthwiseConv2D")
        default_args_check(bias_regularizer, "bias_regularizer",
                           "DepthwiseConv2D")
        default_args_check(activity_regularizer, "activity_regularizer",
                           "DepthwiseConv2D")
        default_args_check(depthwise_constraint, "depthwise_constraint",
                           "DepthwiseConv2D")
        default_args_check(bias_constraint, "bias_constraint",
                           "DepthwiseConv2D")
    def __init__(
            self,
            rank: int,
            head_size: int,
            head_count: int,
            kernel_size: Union[int, Tuple, List],
            strides: Union[int, Tuple, List],
            # data_format: Optional[AnyStr],
            dilation_rate: Union[int, Tuple, List],
            activation: Optional[Union[AnyStr, Callable]],
            use_bias: bool,
            kernel_initializer: Optional[Union[Dict, AnyStr, Callable]],
            bias_initializer: Optional[Union[Dict, AnyStr, Callable]],
            embeddings_initializer: Optional[Union[Dict, AnyStr, Callable]],
            kernel_regularizer: Optional[Union[Dict, AnyStr, Callable]],
            bias_regularizer: Optional[Union[Dict, AnyStr, Callable]],
            activity_regularizer: Optional[Union[Dict, AnyStr, Callable]],
            kernel_constraint: Optional[Union[Dict, AnyStr, Callable]],
            bias_constraint: Optional[Union[Dict, AnyStr, Callable]],
            trainable=True,
            name=None,
            **kwargs):
        activity_regularizer = regularizers.get(activity_regularizer)
        super(StandAloneSelfAttention,
              self).__init__(trainable=trainable,
                             name=name,
                             activity_regularizer=activity_regularizer,
                             **kwargs)

        # region Utils (normalizing tuples, data format and getting initializers/regularizers/constraints)
        kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                 "kernel_size")
        strides = conv_utils.normalize_tuple(strides, rank, "strides")
        # data_format = conv_utils.normalize_data_format(data_format)
        dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank,
                                                   "dilation_rate")
        activation = activations.get(activation)
        kernel_initializer = initializers.get(kernel_initializer)
        bias_initializer = initializers.get(bias_initializer)
        if embeddings_initializer == "random_normal":
            embeddings_initializer = initializers.initializers_v2.RandomNormal(
                stddev=1.0)
        embeddings_initializer = initializers.get(embeddings_initializer)
        kernel_regularizer = regularizers.get(kernel_regularizer)
        bias_regularizer = regularizers.get(bias_regularizer)
        kernel_constraint = constraints.get(kernel_constraint)
        bias_constraint = constraints.get(bias_constraint)
        # endregion

        # region Base attributes
        self.rank = rank
        self.head_size = head_size
        self.head_count = head_count
        self.kernel_size = kernel_size
        self.strides = strides
        # self.data_format = data_format
        self.dilation_rate = dilation_rate
        self.activation = activation
        self.use_bias = use_bias
        self.kernel_initializer = kernel_initializer
        self.bias_initializer = bias_initializer
        self.embeddings_initializer = embeddings_initializer
        self.kernel_regularizer = kernel_regularizer
        self.bias_regularizer = bias_regularizer
        self.kernel_constraint = kernel_constraint
        self.bias_constraint = bias_constraint
        # endregion

        # region Queries/Keys/Values conv layers
        common_parameters = {
            "rank": self.rank,
            "filters": self.filters,
            "kernel_size": 1,
            "use_bias": self.use_bias,
            "kernel_initializer": self.kernel_initializer,
            "bias_initializer": self.bias_initializer,
            "kernel_regularizer": self.kernel_regularizer,
            "activity_regularizer": self.activity_regularizer,
            "kernel_constraint": self.kernel_constraint,
            "bias_constraint": self.bias_constraint,
        }
        self.queries_layer = Conv(name="Conv_Queries{}".format(self.name),
                                  **common_parameters)
        self.keys_layer = Conv(name="Conv_Keys{}".format(self.name),
                               **common_parameters)
        self.values_layer = Conv(name="Conv_Values{}".format(self.name),
                                 **common_parameters)
        # endregion

        # region Queries/Keys/Values unfold layers
        self.queries_unfold = Unfold(kernel_size=1,
                                     strides=strides,
                                     name="Unfold_Queries_{}".format(
                                         self.name))
        self.keys_unfold = Unfold(kernel_size=kernel_size,
                                  strides=strides,
                                  dilation_rate=dilation_rate,
                                  padding="SAME",
                                  name="Unfold_Keys_{}".format(self.name))
        self.values_unfold = Unfold(kernel_size=kernel_size,
                                    strides=strides,
                                    dilation_rate=dilation_rate,
                                    padding="SAME",
                                    name="Unfold_Values_{}".format(self.name))
        # endregion

        # region Time/Height/Width embeddings
        conv_embeddings = []
        for i in range(rank):
            dim_embeddings_size = self.filters // rank
            if i == 0:
                dim_embeddings_size += self.filters % rank

            dim_embeddings_shape = (dim_embeddings_size, *[1] * i,
                                    kernel_size[i], *[1] * (rank - i - 1))
            dim_embeddings = self.add_weight(
                name="dim_{}_embeddings".format(i + 1),
                shape=dim_embeddings_shape,
                dtype=tf.float32,
                initializer=self.embeddings_initializer)
            conv_embeddings.append(dim_embeddings)
        self.conv_embeddings = conv_embeddings
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=1,
                 rank=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 init_sigma=0.1,
                 groups=1,
                 norm=2,
                 activation=None,
                 trainSigmas=True,
                 trainWeights=True,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 sigma_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 conv_op=None,
                 **kwargs):
        super(Conv1DAdaptive, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.initsigma = None
        self.rank = rank

        if isinstance(filters, float):
            filters = int(filters)
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')

        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.initsigma = init_sigma
        self.norm = norm
        self.sigma_regularizer = regularizers.get(sigma_regularizer)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)
        self.trainSigmas = trainSigmas
        self.trainWeights = trainWeights
        self._is_causal = self.padding == 'causal'
        self._channels_first = self.data_format == 'channels_first'
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
 def __init__(self, size=(1, 2, 2), interpolation='bilinear', **kwargs):
     self.size = conv_utils.normalize_tuple(size, 3, 'size')
     self.x = int(size[1])
     self.y = int(size[2])
     self.interpolation = interpolation
     super(self.__class__, self).__init__(**kwargs)
Exemple #28
0
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=None,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(Conv2D, self).__init__(**kwargs)

        self.rank = 2
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, self.rank,
                                                      'kernel_size')
        if self.kernel_size[0] != self.kernel_size[1]:
            raise NotImplementedError(
                "TF Encrypted currently only supports same "
                "stride along the height and the width."
                "You gave: {}".format(self.kernel_size))
        self.strides = conv_utils.normalize_tuple(strides, self.rank,
                                                  'strides')
        self.padding = conv_utils.normalize_padding(padding).upper()
        self.data_format = conv_utils.normalize_data_format(data_format)
        if activation is not None:
            logger.info(
                "Performing an activation before a pooling layer can result "
                "in unnecessary performance loss. Check model definition in "
                "case of missed optimization.")
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        if dilation_rate:
            raise NotImplementedError(
                arg_not_impl_msg.format("dilation_rate", "Conv2d"), )
        if kernel_regularizer:
            raise NotImplementedError(
                arg_not_impl_msg.format("kernel_regularizer", "Conv2d"), )
        if bias_regularizer:
            raise NotImplementedError(
                arg_not_impl_msg.format("bias_regularizer", "Conv2d"), )
        if activity_regularizer:
            raise NotImplementedError(
                arg_not_impl_msg.format("activity_regularizer", "Conv2d"), )
        if kernel_constraint:
            raise NotImplementedError(
                arg_not_impl_msg.format("kernel_constraint", "Conv2d"), )
        if bias_constraint:
            raise NotImplementedError(
                arg_not_impl_msg.format("bias_constraint", "Conv2d"), )
Exemple #29
0
def sample_label_movie(y,
                       window_size=(30, 30, 5),
                       padding='valid',
                       max_training_examples=1e7,
                       data_format=None):
    """Sample a 5D Tensor, creating many small voxels of shape window_size.

    Args:
        y (numpy.array): label masks with the same shape as X data
        window_size (tuple): size of window around each pixel to sample
        padding (str): padding type 'valid' or 'same'
        max_training_examples (int): max number of samples per class
        data_format (str): 'channels_first' or 'channels_last'

    Returns:
        tuple: 5 arrays of coordinates of each sampled pixel
    """
    data_format = conv_utils.normalize_data_format(data_format)
    is_channels_first = data_format == 'channels_first'
    if is_channels_first:
        num_dirs, num_features, image_size_z, image_size_x, image_size_y = y.shape
    else:
        num_dirs, image_size_z, image_size_x, image_size_y, num_features = y.shape

    window_size = conv_utils.normalize_tuple(window_size, 3, 'window_size')
    window_size_x, window_size_y, window_size_z = window_size

    feature_rows, feature_cols, feature_frames, feature_batch, feature_label = [], [], [], [], []

    for d in range(num_dirs):
        for k in range(num_features):
            if is_channels_first:
                frames_temp, rows_temp, cols_temp = np.where(
                    y[d, k, :, :, :] == 1)
            else:
                frames_temp, rows_temp, cols_temp = np.where(y[d, :, :, :,
                                                               k] == 1)

            # Check to make sure the features are actually present
            if not rows_temp.size > 0:
                continue

            # Randomly permute index vector
            non_rand_ind = np.arange(len(rows_temp))
            rand_ind = np.random.choice(non_rand_ind,
                                        size=len(rows_temp),
                                        replace=False)

            for i in rand_ind:
                condition = padding == 'valid' and \
                    frames_temp[i] - window_size_z > 0 and \
                    frames_temp[i] + window_size_z < image_size_z and \
                    rows_temp[i] - window_size_x > 0 and \
                    rows_temp[i] + window_size_x < image_size_x and \
                    cols_temp[i] - window_size_y > 0 and \
                    cols_temp[i] + window_size_y < image_size_y

                if padding == 'same' or condition:
                    feature_rows.append(rows_temp[i])
                    feature_cols.append(cols_temp[i])
                    feature_frames.append(frames_temp[i])
                    feature_batch.append(d)
                    feature_label.append(k)

    # Randomize
    non_rand_ind = np.arange(len(feature_rows), dtype='int32')
    if not max_training_examples:
        max_training_examples = non_rand_ind.size
    else:
        max_training_examples = int(max_training_examples)

    limit = min(non_rand_ind.size, max_training_examples)
    rand_ind = np.random.choice(non_rand_ind, size=limit, replace=False)

    feature_frames = np.array(feature_frames, dtype='int32')[rand_ind]
    feature_rows = np.array(feature_rows, dtype='int32')[rand_ind]
    feature_cols = np.array(feature_cols, dtype='int32')[rand_ind]
    feature_batch = np.array(feature_batch, dtype='int32')[rand_ind]
    feature_label = np.array(feature_label, dtype='int32')[rand_ind]

    return feature_frames, feature_rows, feature_cols, feature_batch, feature_label
Exemple #30
0
 def __init__(self, up_sampling=(2, 2), data_format=None, **kwargs):
     super(bilinear_upsampling, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.up_sampling = conv_utils.normalize_tuple(up_sampling, 2, 'size')
     self.input_spec = InputSpec(ndim=4)
Exemple #31
0
 def __init__(self, size=(2, 2), data_format=None, **kwargs):
     super(BilinearUpSampling2D, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.size = conv_utils.normalize_tuple(size, 2, 'size')
     self.input_spec = InputSpec(ndim=4)
Exemple #32
0
def sample_label_movie(y, window_size=(30, 30, 5), padding='valid',
                       max_training_examples=1e7, data_format=None):
    """Create a list of the maximum pixels to sample from each feature in each
    data set. If output_mode is 'sample', then this will be set to the number
    of edge pixels. If not, it will be set to np.Inf, i.e. sampling everything.
    """
    data_format = conv_utils.normalize_data_format(data_format)
    is_channels_first = data_format == 'channels_first'
    if is_channels_first:
        num_dirs, num_features, image_size_z, image_size_x, image_size_y = y.shape
    else:
        num_dirs, image_size_z, image_size_x, image_size_y, num_features = y.shape

    window_size = conv_utils.normalize_tuple(window_size, 3, 'window_size')
    window_size_x, window_size_y, window_size_z = window_size

    feature_rows, feature_cols, feature_frames, feature_batch, feature_label = [], [], [], [], []

    for d in range(num_dirs):
        for k in range(num_features):
            if is_channels_first:
                frames_temp, rows_temp, cols_temp = np.where(y[d, k, :, :, :] == 1)
            else:
                frames_temp, rows_temp, cols_temp = np.where(y[d, :, :, :, k] == 1)

            # Check to make sure the features are actually present
            if not rows_temp.size > 0:
                continue

            # Randomly permute index vector
            non_rand_ind = np.arange(len(rows_temp))
            rand_ind = np.random.choice(non_rand_ind, size=len(rows_temp), replace=False)

            for i in rand_ind:
                condition = padding == 'valid' and \
                    frames_temp[i] - window_size_z > 0 and \
                    frames_temp[i] + window_size_z < image_size_z and \
                    rows_temp[i] - window_size_x > 0 and \
                    rows_temp[i] + window_size_x < image_size_x and \
                    cols_temp[i] - window_size_y > 0 and \
                    cols_temp[i] + window_size_y < image_size_y

                if padding == 'same' or condition:
                    feature_rows.append(rows_temp[i])
                    feature_cols.append(cols_temp[i])
                    feature_frames.append(frames_temp[i])
                    feature_batch.append(d)
                    feature_label.append(k)

    # Randomize
    non_rand_ind = np.arange(len(feature_rows), dtype='int32')
    if not max_training_examples:
        max_training_examples = non_rand_ind.size
    else:
        max_training_examples = int(max_training_examples)

    limit = min(non_rand_ind.size, max_training_examples)
    rand_ind = np.random.choice(non_rand_ind, size=limit, replace=False)

    feature_frames = np.array(feature_frames, dtype='int32')[rand_ind]
    feature_rows = np.array(feature_rows, dtype='int32')[rand_ind]
    feature_cols = np.array(feature_cols, dtype='int32')[rand_ind]
    feature_batch = np.array(feature_batch, dtype='int32')[rand_ind]
    feature_label = np.array(feature_label, dtype='int32')[rand_ind]

    return feature_frames, feature_rows, feature_cols, feature_batch, feature_label
Exemple #33
0
 def __init__(self, output_dim=(1, 1), data_format=None, **kwargs):
     super(ResizeImages, self).__init__(**kwargs)
     data_format = conv_utils.normalize_data_format(data_format)
     self.output_dim = conv_utils.normalize_tuple(output_dim, 2, 'output_dim')
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
Exemple #34
0
    def __init__(self,
                 rank,
                 lgroups,
                 lfilters,
                 kernel_size,
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 **kwargs):
        super(_GroupConv, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank
        if rank > 2:
            raise ValueError(
                'The quick group convolution does not support 3D or any higher dimension.'
            )
        initRank = rank
        self.lgroups = lgroups
        self.lfilters = lfilters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        if (self.padding == 'causal'
                and not isinstance(self, (Conv1D, SeparableConv1D))):
            raise ValueError(
                'Causal padding is only supported for `Conv1D` and ``SeparableConv1D`.'
            )
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')
        if rank == 1:  # when rank=1, expand the tuples to 2D case.
            self.kernel_size = (1, *self.kernel_size)
            self.strides = (1, *self.strides)
            self.dilation_rate = (1, *self.dilation_rate)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=self.rank + 2)

        self.group_input_dim = None
        self.exp_dim_pos = None