Esempio n. 1
0
  def test_normalize_data_format(self):
    self.assertEqual('channels_last',
                     conv_utils.normalize_data_format('Channels_Last'))
    self.assertEqual('channels_first',
                     conv_utils.normalize_data_format('CHANNELS_FIRST'))

    with self.assertRaises(ValueError):
      conv_utils.normalize_data_format('invalid')
Esempio n. 2
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(LocallyConnected1D, self).__init__(**kwargs)
   self.filters = filters
   self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   if self.padding != 'valid':
     raise ValueError('Invalid border mode for LocallyConnected1D '
                      '(only "valid" is supported): ' + padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.activation = activations.get(activation)
   self.use_bias = use_bias
   self.kernel_initializer = initializers.get(kernel_initializer)
   self.bias_initializer = initializers.get(bias_initializer)
   self.kernel_regularizer = regularizers.get(kernel_regularizer)
   self.bias_regularizer = regularizers.get(bias_regularizer)
   self.activity_regularizer = regularizers.get(activity_regularizer)
   self.kernel_constraint = constraints.get(kernel_constraint)
   self.bias_constraint = constraints.get(bias_constraint)
   self.input_spec = InputSpec(ndim=3)
  def __init__(self,
               filters,
               kernel_size,
               strides=(1, 1),
               padding='valid',
               data_format=None,
               dilation_rate=(1, 1),
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               use_bias=True,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               dropout=0.,
               recurrent_dropout=0.,
               **kwargs):
    super(ConvLSTM2DCell, self).__init__(**kwargs)
    self.filters = filters
    self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
    self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
    self.padding = conv_utils.normalize_padding(padding)
    self.data_format = conv_utils.normalize_data_format(data_format)
    self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
                                                    'dilation_rate')
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.use_bias = use_bias

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.dropout = min(1., max(0., dropout))
    self.recurrent_dropout = min(1., max(0., recurrent_dropout))
    self.state_size = (self.filters, self.filters)
    self._dropout_mask = None
    self._recurrent_dropout_mask = None
Esempio n. 4
0
 def __init__(self, pool_function, pool_size, strides,
              padding='valid', data_format='channels_last',
              name=None, **kwargs):
   super(Pooling1D, self).__init__(name=name, **kwargs)
   if data_format is None:
     data_format = backend.image_data_format()
   if strides is None:
     strides = pool_size
   self.pool_function = pool_function
   self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.input_spec = InputSpec(ndim=3)
Esempio n. 5
0
 def __init__(self, data_format=None, **kwargs):
   super(Flatten, self).__init__(**kwargs)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.input_spec = InputSpec(min_ndim=1)
Esempio n. 6
0
def sample_label_movie(y, window_size=(30, 30, 5), padding='valid',
                       max_training_examples=1e7, data_format=None):
    """Create a list of the maximum pixels to sample from each feature in each
    data set. If output_mode is 'sample', then this will be set to the number
    of edge pixels. If not, it will be set to np.Inf, i.e. sampling everything.
    """
    data_format = conv_utils.normalize_data_format(data_format)
    is_channels_first = data_format == 'channels_first'
    if is_channels_first:
        num_dirs, num_features, image_size_z, image_size_x, image_size_y = y.shape
    else:
        num_dirs, image_size_z, image_size_x, image_size_y, num_features = y.shape

    window_size = conv_utils.normalize_tuple(window_size, 3, 'window_size')
    window_size_x, window_size_y, window_size_z = window_size

    feature_rows, feature_cols, feature_frames, feature_batch, feature_label = [], [], [], [], []

    for d in range(num_dirs):
        for k in range(num_features):
            if is_channels_first:
                frames_temp, rows_temp, cols_temp = np.where(y[d, k, :, :, :] == 1)
            else:
                frames_temp, rows_temp, cols_temp = np.where(y[d, :, :, :, k] == 1)

            # Check to make sure the features are actually present
            if not rows_temp.size > 0:
                continue

            # Randomly permute index vector
            non_rand_ind = np.arange(len(rows_temp))
            rand_ind = np.random.choice(non_rand_ind, size=len(rows_temp), replace=False)

            for i in rand_ind:
                condition = padding == 'valid' and \
                    frames_temp[i] - window_size_z > 0 and \
                    frames_temp[i] + window_size_z < image_size_z and \
                    rows_temp[i] - window_size_x > 0 and \
                    rows_temp[i] + window_size_x < image_size_x and \
                    cols_temp[i] - window_size_y > 0 and \
                    cols_temp[i] + window_size_y < image_size_y

                if padding == 'same' or condition:
                    feature_rows.append(rows_temp[i])
                    feature_cols.append(cols_temp[i])
                    feature_frames.append(frames_temp[i])
                    feature_batch.append(d)
                    feature_label.append(k)

    # Randomize
    non_rand_ind = np.arange(len(feature_rows), dtype='int32')
    if not max_training_examples:
        max_training_examples = non_rand_ind.size
    else:
        max_training_examples = int(max_training_examples)

    limit = min(non_rand_ind.size, max_training_examples)
    rand_ind = np.random.choice(non_rand_ind, size=limit, replace=False)

    feature_frames = np.array(feature_frames, dtype='int32')[rand_ind]
    feature_rows = np.array(feature_rows, dtype='int32')[rand_ind]
    feature_cols = np.array(feature_cols, dtype='int32')[rand_ind]
    feature_batch = np.array(feature_batch, dtype='int32')[rand_ind]
    feature_label = np.array(feature_label, dtype='int32')[rand_ind]

    return feature_frames, feature_rows, feature_cols, feature_batch, feature_label
Esempio n. 7
0
 def __init__(self, data_format=None, **kwargs):
     super(UpsampleLike, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=1,
                 rank=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 init_sigma=0.1,
                 groups=1,
                 norm=2,
                 activation=None,
                 trainSigmas=True,
                 trainWeights=True,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 sigma_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 conv_op=None,
                 **kwargs):
        super(Conv1DAdaptive, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.initsigma = None
        self.rank = rank

        if isinstance(filters, float):
            filters = int(filters)
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')

        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.initsigma = init_sigma
        self.norm = norm
        self.sigma_regularizer = regularizers.get(sigma_regularizer)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)
        self.trainSigmas = trainSigmas
        self.trainWeights = trainWeights
        self._is_causal = self.padding == 'causal'
        self._channels_first = self.data_format == 'channels_first'
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
Esempio n. 9
0
    def __init__(self,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 depth_multiplier=1,
                 data_format=None,
                 activation=None,
                 use_bias=True,
                 depthwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(DepthwiseConv2D,
              self).__init__(filters=None,
                             kernel_size=kernel_size,
                             strides=strides,
                             padding=padding,
                             data_format=data_format,
                             activation=activation,
                             use_bias=use_bias,
                             bias_regularizer=bias_regularizer,
                             activity_regularizer=activity_regularizer,
                             bias_constraint=bias_constraint,
                             **kwargs)

        self.rank = 2
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, self.rank,
                                                      'kernel_size')
        if self.kernel_size[0] != self.kernel_size[1]:
            raise NotImplementedError(
                "TF Encrypted currently only supports same "
                "stride along the height and the width."
                "You gave: {}".format(self.kernel_size))
        self.strides = conv_utils.normalize_tuple(strides, self.rank,
                                                  'strides')
        self.padding = conv_utils.normalize_padding(padding).upper()
        self.depth_multiplier = depth_multiplier
        self.data_format = conv_utils.normalize_data_format(data_format)
        if activation is not None:
            logger.info(
                "Performing an activation before a pooling layer can result "
                "in unnecessary performance loss. Check model definition in "
                "case of missed optimization.")
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.depthwise_initializer = initializers.get(depthwise_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        # Not implemented arguments
        default_args_check(depthwise_regularizer, "depthwise_regularizer",
                           "DepthwiseConv2D")
        default_args_check(bias_regularizer, "bias_regularizer",
                           "DepthwiseConv2D")
        default_args_check(activity_regularizer, "activity_regularizer",
                           "DepthwiseConv2D")
        default_args_check(depthwise_constraint, "depthwise_constraint",
                           "DepthwiseConv2D")
        default_args_check(bias_constraint, "bias_constraint",
                           "DepthwiseConv2D")
Esempio n. 10
0
 def __init__(self, data_format=None, **kwargs):
     super(GlobalPooling3D, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=5)
Esempio n. 11
0
    def __init__(self,
                 rank,
                 kernel_size,
                 growth_rate,
                 depth,
                 output_filters=None,
                 use_bottleneck=True,
                 bottleneck_filters_multiplier=4,
                 use_batch_normalization=True,
                 data_format=None,
                 activation="relu",
                 use_bias=True,
                 kernel_initializer="he_normal",
                 bias_initializer="zeros",
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        if rank not in [1, 2, 3]:
            raise ValueError(
                "`rank` must be in [1, 2, 3]. Got {}".format(rank))

        super(DenseBlockND, self).__init__(**kwargs)

        self.rank = rank
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      "kernel_size")
        self.output_filters = output_filters
        self.growth_rate = growth_rate

        if use_bottleneck:
            if (depth % 2) != 0:
                raise ValueError(
                    "Depth must be a multiple of 2 when using bottlenecks. Got {}."
                    .format(depth))

        self._depth = depth // 2 if use_bottleneck else depth
        self.use_bottleneck = use_bottleneck
        self.bottleneck_filters_multiplier = bottleneck_filters_multiplier

        self.use_batch_normalization = use_batch_normalization

        self.data_format = conv_utils.normalize_data_format(data_format)
        self.channel_axis = -1 if self.data_format == "channels_last" else 1

        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.composite_function_blocks: Optional[
            List[CompositeFunctionBlock]] = None
        self.transition_layer = None

        self.input_spec = InputSpec(ndim=self.rank + 2)
Esempio n. 12
0
 def __init__(self, data_format=None, **kwargs):
     super(GlobalPooling2D, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
     self._supports_ragged_inputs = True
Esempio n. 13
0
 def __init__(self, data_format=None, **kwargs):
   super(GlobalPooling3D, self).__init__(**kwargs)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.input_spec = InputSpec(ndim=5)
Esempio n. 14
0
    def __init__(self,
                 rank,
                 filters,
                 kernel_size,
                 param_reduction=0.5,
                 form='diagonal',
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 groups=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 conv_op=None,
                 **kwargs):
        super(SzConv, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank

        if isinstance(filters, float):
            filters = int(filters)
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')

        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)

        self._validate_init()
        self._is_causal = self.padding == 'causal'
        self._channels_first = self.data_format == 'channels_first'
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)

        self.reduction_sv = param_reduction
        self.form = form
        self.num_ones = 0
        self.num_weights = 0
        self.reduced_ratio = 0
        self.halfbandwidth = 0
Esempio n. 15
0
    def __init__(self,
                 filters,
                 kernel_size,
                 octave=2,
                 ratio_out=0.5,
                 strides=(1, 1),
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(OctaveConv2D, self).__init__(**kwargs)
        self.filters = filters
        self.kernel_size = kernel_size
        self.octave = octave
        self.ratio_out = ratio_out
        self.strides = strides
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = dilation_rate
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.filters_low = int(filters * self.ratio_out)
        self.filters_high = filters - self.filters_low

        self.conv_high_to_high, self.conv_low_to_high = None, None
        if self.filters_high > 0:
            self.conv_high_to_high = self._init_conv(
                self.filters_high, name='{}-Conv2D-HH'.format(self.name))
            self.conv_low_to_high = self._init_conv(self.filters_high,
                                                    name='{}-Conv2D-LH'.format(
                                                        self.name))
        self.conv_low_to_low, self.conv_high_to_low = None, None
        if self.filters_low > 0:
            self.conv_low_to_low = self._init_conv(self.filters_low,
                                                   name='{}-Conv2D-HL'.format(
                                                       self.name))
            self.conv_high_to_low = self._init_conv(self.filters_low,
                                                    name='{}-Conv2D-LL'.format(
                                                        self.name))
        self.pooling = AveragePooling2D(
            pool_size=self.octave,
            padding='valid',
            data_format=data_format,
            name='{}-AveragePooling2D'.format(self.name),
        )
        self.up_sampling = UpSampling2D(
            size=self.octave,
            data_format=data_format,
            interpolation='nearest',
            name='{}-UpSampling2D'.format(self.name),
        )
Esempio n. 16
0
 def __init__(self, data_format=None, **kwargs):
     super(GlobalPooling2D, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
Esempio n. 17
0
    def __init__(
            self,
            rank,
            filters,
            kernel_size,
            strides=1,
            padding="valid",
            data_format=None,
            dilation_rate=1,
            activation=None,
            use_bias=True,
            normalize_weight=False,
            kernel_initializer="complex",
            bias_initializer="zeros",
            gamma_diag_initializer=sqrt_init,
            gamma_off_initializer="zeros",
            kernel_regularizer=None,
            bias_regularizer=None,
            gamma_diag_regularizer=None,
            gamma_off_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            gamma_diag_constraint=None,
            gamma_off_constraint=None,
            init_criterion="he",
            seed=None,
            spectral_parametrization=False,
            transposed=False,
            epsilon=1e-7,
            **kwargs):
        super(ComplexConv, self).__init__(**kwargs)
        self.rank = rank
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, rank, "kernel_size"
        )
        self.strides = conv_utils.normalize_tuple(strides, rank, "strides")
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = "channels_last" \
            if rank == 1 else conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, "dilation_rate"
        )
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.normalize_weight = normalize_weight
        self.init_criterion = init_criterion
        self.spectral_parametrization = spectral_parametrization
        self.transposed = transposed
        self.epsilon = epsilon
        self.kernel_initializer = sanitizedInitGet(kernel_initializer)
        self.bias_initializer = sanitizedInitGet(bias_initializer)
        self.gamma_diag_initializer = sanitizedInitGet(gamma_diag_initializer)
        self.gamma_off_initializer = sanitizedInitGet(gamma_off_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer)
        self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.gamma_diag_constraint = constraints.get(gamma_diag_constraint)
        self.gamma_off_constraint = constraints.get(gamma_off_constraint)
        if seed is None:
            self.seed = np.random.randint(1, 10e6)
        else:
            self.seed = seed
        self.input_spec = InputSpec(ndim=self.rank + 2)

        # The following are initialized later
        self.kernel_shape = None
        self.kernel = None
        self.gamma_rr = None
        self.gamma_ii = None
        self.gamma_ri = None
        self.bias = None
 def __init__(self,
              filters,
              kernel_size,
              rank=2,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1),
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              lambda_l1=None,
              lambda_mask=None,
              shared=None,
              adaptive=None,
              from_kb=None,
              atten=None,
              mask=None,
              bias=None,
              **kwargs):
     super(DecomposedConv, self).__init__(
         trainable=trainable,
         name=name,
         activity_regularizer=regularizers.get(activity_regularizer),
         **kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     if (self.padding == 'causal'
             and not isinstance(self, (Conv1D, SeparableConv1D))):
         raise ValueError('Causal padding is only supported for `Conv1D`'
                          'and ``SeparableConv1D`.')
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     self.sw = shared
     self.aw = adaptive
     self.mask = mask
     self.bias = bias
     self.aw_kb = from_kb
     self.atten = atten
     self.lambda_l1 = lambda_l1
     self.lambda_mask = lambda_mask
Esempio n. 19
0
 def __init__(self, data_format=None, **kwargs):
     super(GlobalPooling2D, self).__init__(**kwargs)
     self._can_use_graph_functions = True
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
Esempio n. 20
0
 def __init__(self, in_shape, data_format=None, **kwargs):
     super(Location2D, self).__init__(**kwargs)
     self.in_shape = in_shape
     self.data_format = conv_utils.normalize_data_format(data_format)
Esempio n. 21
0
 def __init__(self, size=(2, 2), data_format=None, **kwargs):
     super(PixelShuffler, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.size = conv_utils.normalize_tuple(size, 2, 'size')
Esempio n. 22
0
 def __init__(self, data_format='channels_last', name=None, **kwargs):
     super(MaxUnpool2D, self).__init__(**kwargs)
     if data_format is None:
         data_format = tf.keras.backend.image_data_format()
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.input_spec = tf.keras.layers.InputSpec(min_ndim=2, max_ndim=4)
Esempio n. 23
0
 def __init__(self, size=(2, 2), data_format=None, **kwargs):
     super(BilinearUpSampling2D, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.size = conv_utils.normalize_tuple(size, 2, 'size')
     self.input_spec = InputSpec(ndim=4)
Esempio n. 24
0
def sample_label_movie(y,
                       window_size=(30, 30, 5),
                       padding='valid',
                       max_training_examples=1e7,
                       data_format=None):
    """Sample a 5D Tensor, creating many small voxels of shape window_size.

    Args:
        y (numpy.array): label masks with the same shape as X data
        window_size (tuple): size of window around each pixel to sample
        padding (str): padding type 'valid' or 'same'
        max_training_examples (int): max number of samples per class
        data_format (str): 'channels_first' or 'channels_last'

    Returns:
        tuple: 5 arrays of coordinates of each sampled pixel
    """
    data_format = conv_utils.normalize_data_format(data_format)
    is_channels_first = data_format == 'channels_first'
    if is_channels_first:
        num_dirs, num_features, image_size_z, image_size_x, image_size_y = y.shape
    else:
        num_dirs, image_size_z, image_size_x, image_size_y, num_features = y.shape

    window_size = conv_utils.normalize_tuple(window_size, 3, 'window_size')
    window_size_x, window_size_y, window_size_z = window_size

    feature_rows, feature_cols, feature_frames, feature_batch, feature_label = [], [], [], [], []

    for d in range(num_dirs):
        for k in range(num_features):
            if is_channels_first:
                frames_temp, rows_temp, cols_temp = np.where(
                    y[d, k, :, :, :] == 1)
            else:
                frames_temp, rows_temp, cols_temp = np.where(y[d, :, :, :,
                                                               k] == 1)

            # Check to make sure the features are actually present
            if not rows_temp.size > 0:
                continue

            # Randomly permute index vector
            non_rand_ind = np.arange(len(rows_temp))
            rand_ind = np.random.choice(non_rand_ind,
                                        size=len(rows_temp),
                                        replace=False)

            for i in rand_ind:
                condition = padding == 'valid' and \
                    frames_temp[i] - window_size_z > 0 and \
                    frames_temp[i] + window_size_z < image_size_z and \
                    rows_temp[i] - window_size_x > 0 and \
                    rows_temp[i] + window_size_x < image_size_x and \
                    cols_temp[i] - window_size_y > 0 and \
                    cols_temp[i] + window_size_y < image_size_y

                if padding == 'same' or condition:
                    feature_rows.append(rows_temp[i])
                    feature_cols.append(cols_temp[i])
                    feature_frames.append(frames_temp[i])
                    feature_batch.append(d)
                    feature_label.append(k)

    # Randomize
    non_rand_ind = np.arange(len(feature_rows), dtype='int32')
    if not max_training_examples:
        max_training_examples = non_rand_ind.size
    else:
        max_training_examples = int(max_training_examples)

    limit = min(non_rand_ind.size, max_training_examples)
    rand_ind = np.random.choice(non_rand_ind, size=limit, replace=False)

    feature_frames = np.array(feature_frames, dtype='int32')[rand_ind]
    feature_rows = np.array(feature_rows, dtype='int32')[rand_ind]
    feature_cols = np.array(feature_cols, dtype='int32')[rand_ind]
    feature_batch = np.array(feature_batch, dtype='int32')[rand_ind]
    feature_label = np.array(feature_label, dtype='int32')[rand_ind]

    return feature_frames, feature_rows, feature_cols, feature_batch, feature_label
Esempio n. 25
0
File: conv.py Progetto: wujinke/MDNT
    def __init__(self,
                 rank,
                 lgroups,
                 lfilters,
                 kernel_size,
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 **kwargs):
        super(_GroupConv, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank
        if rank > 2:
            raise ValueError(
                'The quick group convolution does not support 3D or any higher dimension.'
            )
        initRank = rank
        self.lgroups = lgroups
        self.lfilters = lfilters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        if (self.padding == 'causal'
                and not isinstance(self, (Conv1D, SeparableConv1D))):
            raise ValueError(
                'Causal padding is only supported for `Conv1D` and ``SeparableConv1D`.'
            )
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')
        if rank == 1:  # when rank=1, expand the tuples to 2D case.
            self.kernel_size = (1, *self.kernel_size)
            self.strides = (1, *self.strides)
            self.dilation_rate = (1, *self.dilation_rate)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=self.rank + 2)

        self.group_input_dim = None
        self.exp_dim_pos = None
Esempio n. 26
0
 def __init__(self, target_size, data_format=None, *args, **kwargs):
     self.target_size = target_size
     self.data_format = conv_utils.normalize_data_format(data_format)
     super(Upsample, self).__init__(*args, **kwargs)
Esempio n. 27
0
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=None,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(Conv2D, self).__init__(**kwargs)

        self.rank = 2
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, self.rank,
                                                      'kernel_size')
        if self.kernel_size[0] != self.kernel_size[1]:
            raise NotImplementedError(
                "TF Encrypted currently only supports same "
                "stride along the height and the width."
                "You gave: {}".format(self.kernel_size))
        self.strides = conv_utils.normalize_tuple(strides, self.rank,
                                                  'strides')
        self.padding = conv_utils.normalize_padding(padding).upper()
        self.data_format = conv_utils.normalize_data_format(data_format)
        if activation is not None:
            logger.info(
                "Performing an activation before a pooling layer can result "
                "in unnecessary performance loss. Check model definition in "
                "case of missed optimization.")
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        if dilation_rate:
            raise NotImplementedError(
                arg_not_impl_msg.format("dilation_rate", "Conv2d"), )
        if kernel_regularizer:
            raise NotImplementedError(
                arg_not_impl_msg.format("kernel_regularizer", "Conv2d"), )
        if bias_regularizer:
            raise NotImplementedError(
                arg_not_impl_msg.format("bias_regularizer", "Conv2d"), )
        if activity_regularizer:
            raise NotImplementedError(
                arg_not_impl_msg.format("activity_regularizer", "Conv2d"), )
        if kernel_constraint:
            raise NotImplementedError(
                arg_not_impl_msg.format("kernel_constraint", "Conv2d"), )
        if bias_constraint:
            raise NotImplementedError(
                arg_not_impl_msg.format("bias_constraint", "Conv2d"), )
Esempio n. 28
0
    def __init__(self, scale_factor=2, data_format=None, **kwargs):
        super(SubPixelDownscaling, self).__init__(**kwargs)

        self.scale_factor = scale_factor
        self.data_format = normalize_data_format(data_format)
Esempio n. 29
0
 def __init__(self, up_sampling=(2, 2), data_format=None, **kwargs):
     super(bilinear_upsampling, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.up_sampling = conv_utils.normalize_tuple(up_sampling, 2, 'size')
     self.input_spec = InputSpec(ndim=4)
Esempio n. 30
0
 def __init__(self, data_format=None, **kwargs):
     super(ClipBoxes, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
Esempio n. 31
0
    def __init__(self,
                 filters,
                 kernel_size,
                 cov_kernel_size=(3, 1),
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 **kwargs):
        super(ConvLSTM2DCell_2, self).__init__(**kwargs)
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2,
                                                      'kernel_size')
        #############################
        self.cov_kernel_size = cov_kernel_size
        self.kernel_size_1 = conv_utils.normalize_tuple(
            cov_kernel_size, 2, 'kernel_size')
        ##################################
        self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, 2, 'dilation_rate')
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        ###############################
        self.cov_kernel_initializer = initializers.get(kernel_initializer)
        ##########################
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.state_size = (self.filters, self.filters)
Esempio n. 32
0
 def __init__(self, data_format='channels_last', **kwargs):
     super(GlobalPooling1D, self).__init__(**kwargs)
     self.input_spec = InputSpec(ndim=3)
     self.data_format = conv_utils.normalize_data_format(data_format)
Esempio n. 33
0
def sample_label_matrix(y, window_size=(30, 30), padding='valid',
                        max_training_examples=1e7, data_format=None):
    """Sample a 4D Tensor, creating many small images of shape `window_size`.

    Args:
        y: label masks with the same shape as `X` data
        window_size: size of window around each pixel to sample
        padding: padding type `valid` or `same`
        max_training_examples: max number of samples per class
        data_format: `channels_first` or `channels_last`

    Returns:
        4 arrays of coordinates of each sampled pixel
    """
    data_format = conv_utils.normalize_data_format(data_format)
    is_channels_first = data_format == 'channels_first'
    if is_channels_first:
        num_dirs, num_features, image_size_x, image_size_y = y.shape
    else:
        num_dirs, image_size_x, image_size_y, num_features = y.shape

    window_size = conv_utils.normalize_tuple(window_size, 2, 'window_size')
    window_size_x, window_size_y = window_size

    feature_rows, feature_cols, feature_batch, feature_label = [], [], [], []

    for direc in range(num_dirs):
        for k in range(num_features):
            if is_channels_first:
                feature_rows_temp, feature_cols_temp = np.where(y[direc, k, :, :] == 1)
            else:
                feature_rows_temp, feature_cols_temp = np.where(y[direc, :, :, k] == 1)

            # Check to make sure the features are actually present
            if not feature_rows_temp.size > 0:
                continue

            # Randomly permute index vector
            non_rand_ind = np.arange(len(feature_rows_temp))
            rand_ind = np.random.choice(non_rand_ind, size=len(feature_rows_temp), replace=False)

            for i in rand_ind:
                condition = padding == 'valid' and \
                    feature_rows_temp[i] - window_size_x > 0 and \
                    feature_rows_temp[i] + window_size_x < image_size_x and \
                    feature_cols_temp[i] - window_size_y > 0 and \
                    feature_cols_temp[i] + window_size_y < image_size_y

                if padding == 'same' or condition:
                    feature_rows.append(feature_rows_temp[i])
                    feature_cols.append(feature_cols_temp[i])
                    feature_batch.append(direc)
                    feature_label.append(k)

    # Randomize
    non_rand_ind = np.arange(len(feature_rows), dtype='int32')
    if not max_training_examples:
        max_training_examples = non_rand_ind.size
    else:
        max_training_examples = int(max_training_examples)

    limit = min(non_rand_ind.size, max_training_examples)
    rand_ind = np.random.choice(non_rand_ind, size=limit, replace=False)

    feature_rows = np.array(feature_rows, dtype='int32')[rand_ind]
    feature_cols = np.array(feature_cols, dtype='int32')[rand_ind]
    feature_batch = np.array(feature_batch, dtype='int32')[rand_ind]
    feature_label = np.array(feature_label, dtype='int32')[rand_ind]

    return feature_rows, feature_cols, feature_batch, feature_label
Esempio n. 34
0
 def __init__(self, data_format='channels_last', **kwargs):
   super(GlobalPooling1D, self).__init__(**kwargs)
   self.input_spec = InputSpec(ndim=3)
   self.data_format = conv_utils.normalize_data_format(data_format)
Esempio n. 35
0
    def __init__(
            self,
            filters,
            kernel_size,
            feature_number,  # ????feature
            strides=(1, 1),
            padding='valid',
            data_format=None,
            dilation_rate=(1, 1),
            activation='tanh',
            recurrent_activation='hard_sigmoid',
            conv_activation='hard_sigmoid',
            convolutional_type="early",
            use_bias=True,
            kernel_initializer='glorot_uniform',
            recurrent_initializer='orthogonal',
            bias_initializer='zeros',
            unit_forget_bias=True,
            kernel_regularizer=None,
            recurrent_regularizer=None,
            bias_regularizer=None,
            kernel_constraint=None,
            recurrent_constraint=None,
            bias_constraint=None,
            dropout=0.,
            recurrent_dropout=0.,
            **kwargs):
        """
        filters : A list , Specifies the number of filters in each layer, e.g. [10,10]
        kernel_size : A List , Same length as filters, Window size for 1D convolution e.g. [3,3]    # ????feature
        feature_number: int , Number of multiple time series e.g 28 sensors -->  28 
        recurrent_activation : A str List, Specifies the tupe of activation functions
        
        
        """

        super(ConvLSTM1DCell, self).__init__(**kwargs)

        self.number_of_layer = len(filters)

        self.out_feature_number = feature_number
        self.convolutional_type = convolutional_type

        # =============   Each layer has different parameters    ======================
        self.filters = filters
        self.conv_layer_number = len(filters)

        self.kernel_size = []

        for index, size in enumerate(kernel_size):
            if self.convolutional_type[index] == "hybrid":
                self.kernel_size.append(
                    conv_utils.normalize_tuple((size, 1), 2, 'kernel_size'))
            if self.convolutional_type[index] == "early":
                self.kernel_size.append(
                    conv_utils.normalize_tuple((size, feature_number), 2,
                                               'kernel_size'))
                self.out_feature_number = 1
                feature_number = 1

        self.recurrent_activation = []
        for acti in recurrent_activation:
            self.recurrent_activation.append(activations.get(acti))

        self.conv_activation = []
        for acti in conv_activation:
            self.conv_activation.append(activations.get(acti))

        self.state_size = (self.filters[-1], self.filters[-1])

        # =============   Each layer has the same parameter   ======================
        self.strides = conv_utils.normalize_tuple(strides, 2,
                                                  'strides')  # (1,1)
        self.padding = conv_utils.normalize_padding(padding)  # valid
        self.data_format = conv_utils.normalize_data_format(
            data_format)  # None --- -1
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, 2, 'dilation_rate')
        self.activation = activations.get(activation)  # tanh default
        self.use_bias = use_bias  # True
        self.kernel_initializer = initializers.get(
            kernel_initializer)  # glorot_uniform
        self.recurrent_initializer = initializers.get(
            recurrent_initializer)  # orthogonal
        self.bias_initializer = initializers.get(bias_initializer)  # zeros
        self.unit_forget_bias = unit_forget_bias  # True

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
Esempio n. 36
0
 def __init__(self, data_format=None, **kwargs):
   super(Flatten, self).__init__(**kwargs)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.input_spec = InputSpec(min_ndim=2)
Esempio n. 37
0
 def __init__(self, data_format=None, **kwargs):
     super(Flatten, self).__init__(**kwargs)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.input_spec = InputSpec(min_ndim=2)
     self._can_use_graph_functions = True