Esempio n. 1
0
    def __init__(
            self,
            rank,
            filters,
            kernel_size,
            strides=1,
            padding="valid",
            data_format=None,
            dilation_rate=1,
            activation=None,
            use_bias=True,
            normalize_weight=False,
            kernel_initializer="complex",
            bias_initializer="zeros",
            gamma_diag_initializer=sqrt_init,
            gamma_off_initializer="zeros",
            kernel_regularizer=None,
            bias_regularizer=None,
            gamma_diag_regularizer=None,
            gamma_off_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            gamma_diag_constraint=None,
            gamma_off_constraint=None,
            init_criterion="he",
            seed=None,
            spectral_parametrization=False,
            transposed=False,
            epsilon=1e-7,
            **kwargs):
        super(ComplexConv, self).__init__(**kwargs)
        self.rank = rank
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, rank, "kernel_size"
        )
        self.strides = conv_utils.normalize_tuple(strides, rank, "strides")
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = "channels_last" \
            if rank == 1 else normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, "dilation_rate"
        )
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.normalize_weight = normalize_weight
        self.init_criterion = init_criterion
        self.spectral_parametrization = spectral_parametrization
        self.transposed = transposed
        self.epsilon = epsilon
        self.kernel_initializer = sanitizedInitGet(kernel_initializer)
        self.bias_initializer = sanitizedInitGet(bias_initializer)
        self.gamma_diag_initializer = sanitizedInitGet(gamma_diag_initializer)
        self.gamma_off_initializer = sanitizedInitGet(gamma_off_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer)
        self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.gamma_diag_constraint = constraints.get(gamma_diag_constraint)
        self.gamma_off_constraint = constraints.get(gamma_off_constraint)
        if seed is None:
            self.seed = np.random.randint(1, 10e6)
        else:
            self.seed = seed
        self.input_spec = InputSpec(ndim=self.rank + 2)

        # The following are initialized later
        self.kernel_shape = None
        self.kernel = None
        self.gamma_rr = None
        self.gamma_ii = None
        self.gamma_ri = None
        self.bias = None
Esempio n. 2
0
 def __init__(self, scale = 1, **kwargs):
     self.scale = scale
     self.input_spec = [InputSpec(ndim=4)]
     super(Upsampling, self).__init__(**kwargs)
Esempio n. 3
0
    def build(self, input_shape):
        input_shapes = input_shape
        assert (input_shapes[0] == input_shapes[1])
        input_shape = input_shapes[0]
        dim = input_shape[self.axis]
        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')
        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})
        shape = (dim, )  # 用于对实数的初始化
        if self.scale:
            self.gamma_rr = self.add_weight(
                shape=shape,
                name='gamma_rr',
                initializer=self.gamma_diag_initializer,
                regularizer=self.gamma_diag_regularizer,
                constraint=self.gamma_diag_constraint)
            self.gamma_rr = self.add_weight(
                shape=shape,
                name='gamma_ii',
                initializer=self.gamma_diag_initializer,
                regularizer=self.gamma_diag_regularizer,
                constraint=self.gamma_diag_constraint)
            self.gamma_ri = self.add_weight(
                shape=shape,
                name='gamma_ri',
                initializer=self.gamma_off_initializer,
                regularizer=self.gamma_off_regularizer,
                constraint=self.gamma_off_constraint)
            self.moving_Vrr = self.add_weight(
                shape=shape,
                initializer=self.moving_variance_initializer,
                name='moving_Vrr',
                trainable=False)
            self.moving_Vii = self.add_weight(
                shape=shape,
                initializer=self.moving_variance_initializer,
                name='moving_Vii',
                trainable=False)
            self.moving_Vri = self.add_weight(
                shape=shape,
                initializer=self.moving_covariance_initializer,
                name='moving_Vri',
                trainable=False)
        else:
            self.gamma_rr = None
            self.gamma_ii = None
            self.gamma_ri = None
            self.moving_Vrr = None
            self.moving_Vii = None
            self.moving_Vri = None

        if self.center:
            self.beta_real = self.add_weight(shape=shape,
                                             name='beta_real',
                                             initializer=self.beta_initializer,
                                             regularizer=self.beta_regularizer,
                                             constraint=self.beta_constraint)
            self.beta_image = self.add_weight(
                shape=shape,
                name='beta_image',
                initializer=self.beta_initializer,
                regularizer=self.beta_regularizer,
                constraint=self.beta_constraint)
            self.moving_mean_real = self.add_weight(
                shape=shape,
                initializer=self.moving_mean_initializer,
                name='moving_mean_real',
                trainable=False)
            self.moving_mean_image = self.add_weight(
                shape=shape,
                initializer=self.moving_mean_initializer,
                name='moving_mean_image',
                trainable=False)
        else:
            self.beta_real = None
            self.beta_image = None
            self.moving_mean_real = None
            self.moving_mean_image = None

        self.built = True
Esempio n. 4
0
 def __init__(self,
              rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              normalize_weight=False,
              kernel_initializer='complex',
              bias_initializer='zeros',
              gamma_diag_initializer=sqrt_init,
              gamma_off_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              gamma_diag_regularizer=None,
              gamma_off_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              gamma_diag_constraint=None,
              gamma_off_constraint=None,
              init_criterion='he',
              seed=None,
              spectral_parametrization=False,
              epsilon=1e-7,
              **kwargs):
     super(ComplexConv, self).__init__(**kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = 'channels_last' if rank == 1 else conv_utils.normalize_data_format(
         data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.normalize_weight = normalize_weight
     self.init_criterion = init_criterion
     self.spectral_parametrization = spectral_parametrization
     self.epsilon = epsilon
     self.kernel_initializer = sanitizedInitGet(kernel_initializer)
     self.bias_initializer = sanitizedInitGet(bias_initializer)
     self.gamma_diag_initializer = sanitizedInitGet(gamma_diag_initializer)
     self.gamma_off_initializer = sanitizedInitGet(gamma_off_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer)
     self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.gamma_diag_constraint = constraints.get(gamma_diag_constraint)
     self.gamma_off_constraint = constraints.get(gamma_off_constraint)
     if seed is None:
         self.seed = np.random.randint(1, 10e6)
     else:
         self.seed = seed
     self.input_spec = InputSpec(ndim=self.rank + 2)
Esempio n. 5
0
 def __init__(self, **kwargs):
     super(_GlobalSumPooling1D, self).__init__(**kwargs)
     self.input_spec = [InputSpec(ndim=3)]
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.states = [None, None]
        self.trainable_weights = []

        self.W_a = Convolution2D(self.nb_filters_att,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.init)
        self.U_a = Convolution2D(self.nb_filters_att,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.init)
        self.V_a = Convolution2D(1,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=False,
                                 init=self.attentive_init)

        self.W_a.build((input_shape[0], self.nb_filters_att, input_shape[3],
                        input_shape[4]))
        self.U_a.build((input_shape[0], self.nb_filters_in, input_shape[3],
                        input_shape[4]))
        self.V_a.build((input_shape[0], self.nb_filters_att, input_shape[3],
                        input_shape[4]))

        self.W_a.built = True
        self.U_a.built = True
        self.V_a.built = True

        self.W_i = Convolution2D(self.nb_filters_out,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.init)
        self.U_i = Convolution2D(self.nb_filters_out,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.inner_init)

        self.W_i.build((input_shape[0], self.nb_filters_in, input_shape[3],
                        input_shape[4]))
        self.U_i.build((input_shape[0], self.nb_filters_out, input_shape[3],
                        input_shape[4]))

        self.W_i.built = True
        self.U_i.built = True

        self.W_f = Convolution2D(self.nb_filters_out,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.init)
        self.U_f = Convolution2D(self.nb_filters_out,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.inner_init)

        self.W_f.build((input_shape[0], self.nb_filters_in, input_shape[3],
                        input_shape[4]))
        self.U_f.build((input_shape[0], self.nb_filters_out, input_shape[3],
                        input_shape[4]))

        self.W_f.built = True
        self.U_f.built = True

        self.W_c = Convolution2D(self.nb_filters_out,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.init)
        self.U_c = Convolution2D(self.nb_filters_out,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.inner_init)

        self.W_c.build((input_shape[0], self.nb_filters_in, input_shape[3],
                        input_shape[4]))
        self.U_c.build((input_shape[0], self.nb_filters_out, input_shape[3],
                        input_shape[4]))

        self.W_c.built = True
        self.U_c.built = True

        self.W_o = Convolution2D(self.nb_filters_out,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.init)
        self.U_o = Convolution2D(self.nb_filters_out,
                                 self.nb_rows,
                                 self.nb_cols,
                                 border_mode='same',
                                 bias=True,
                                 init=self.inner_init)

        self.W_o.build((input_shape[0], self.nb_filters_in, input_shape[3],
                        input_shape[4]))
        self.U_o.build((input_shape[0], self.nb_filters_out, input_shape[3],
                        input_shape[4]))

        self.W_o.built = True
        self.U_o.built = True

        self.trainable_weights = []
        self.trainable_weights.extend(self.W_a.trainable_weights)
        self.trainable_weights.extend(self.U_a.trainable_weights)
        self.trainable_weights.extend(self.V_a.trainable_weights)
        self.trainable_weights.extend(self.W_i.trainable_weights)
        self.trainable_weights.extend(self.U_i.trainable_weights)
        self.trainable_weights.extend(self.W_f.trainable_weights)
        self.trainable_weights.extend(self.U_f.trainable_weights)
        self.trainable_weights.extend(self.W_c.trainable_weights)
        self.trainable_weights.extend(self.U_c.trainable_weights)
        self.trainable_weights.extend(self.W_o.trainable_weights)
        self.trainable_weights.extend(self.U_o.trainable_weights)
 def __init__(self, data_format=None, **kwargs):
     super(NoisyAndPooling2D, self).__init__(**kwargs)
     self.data_format = K.normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
     self.initializer = initializers.get('zeros')
     self.a = 10
	def build(self, input_shape):
            
		if self.data_format == 'channels_first':
			channel_axis = 1
		else:
			channel_axis = -1
		
		if input_shape[channel_axis] is None:
			raise ValueError('The channel dimension of the inputs '
							 'should be defined. Found `None`.')
		
		input_dim = input_shape[channel_axis] // 4
		self.kernel_shape = self.kernel_size + (input_dim , self.filters)
		
                if self.kernel_initializer in {'quaternion', 'quaternion_independent'}:
			kls = {'quaternion':			 QuaternionInit,
				   'quaternion_independent': QuaternionIndependentFilters}[self.kernel_initializer]
			kern_init = kls(
				kernel_size=self.kernel_size,
				input_dim=input_dim,
				weight_dim=self.rank,
				nb_filters=self.filters,
				criterion=self.init_criterion
			)
		else:
			kern_init = self.kernel_initializer
		
		self.kernel = self.add_weight(
			self.kernel_shape,
			initializer=kern_init,
			name='kernel',
			regularizer=self.kernel_regularizer,
			constraint=self.kernel_constraint
		)
		
		if self.normalize_weight:
			gamma_shape = (input_dim * self.filters,)
			self.gamma_rr = self.add_weight(
				shape=gamma_shape,
				name='gamma_rr',
				initializer=self.gamma_diag_initializer,
				regularizer=self.gamma_diag_regularizer,
				constraint=self.gamma_diag_constraint
			)

			self.gamma_ri = self.add_weight(
				shape=gamma_shape,
				name='gamma_ri',
				initializer=self.gamma_off_initializer,
				regularizer=self.gamma_off_regularizer,
				constraint=self.gamma_off_constraint
			)
			self.gamma_rj = self.add_weight(
				shape=gamma_shape,
				name='gamma_rj',
				initializer=self.gamma_off_initializer,
				regularizer=self.gamma_off_regularizer,
				constraint=self.gamma_off_constraint
			)
			self.gamma_rk = self.add_weight(
				shape=gamma_shape,
				name='gamma_rk',
				initializer=self.gamma_off_initializer,
				regularizer=self.gamma_off_regularizer,
				constraint=self.gamma_off_constraint
			)
			self.gamma_ii = self.add_weight(
				shape=gamma_shape,
				name='gamma_ii',
				initializer=self.gamma_diag_initializer,
				regularizer=self.gamma_diag_regularizer,
				constraint=self.gamma_diag_constraint
			)

			self.gamma_ij = self.add_weight(
				shape=gamma_shape,
				name='gamma_ij',
				initializer=self.gamma_off_initializer,
				regularizer=self.gamma_off_regularizer,
				constraint=self.gamma_off_constraint
			)
			self.gamma_ik = self.add_weight(
				shape=gamma_shape,
				name='gamma_ik',
				initializer=self.gamma_off_initializer,
				regularizer=self.gamma_off_regularizer,
				constraint=self.gamma_off_constraint
			)
			self.gamma_jj = self.add_weight(
				shape=gamma_shape,
				name='gamma_jj',
				initializer=self.gamma_diag_initializer,
				regularizer=self.gamma_diag_regularizer,
				constraint=self.gamma_diag_constraint
			)
			self.gamma_jk = self.add_weight(
				shape=gamma_shape,
				name='gamma_jk',
				initializer=self.gamma_diag_initializer,
				regularizer=self.gamma_diag_regularizer,
				constraint=self.gamma_diag_constraint
			)
			self.gamma_kk = self.add_weight(
				shape=gamma_shape,
				name='gamma_kk',
				initializer=self.gamma_off_initializer,
				regularizer=self.gamma_off_regularizer,
				constraint=self.gamma_off_constraint
			)
		else:
			self.gamma_rr = None
			self.gamma_ri = None
			self.gamma_rj = None
			self.gamma_rk = None
			self.gamma_ii = None
			self.gamma_ij = None
			self.gamma_ik = None
			self.gamma_jj = None
			self.gamma_jk = None
			self.gamma_kk = None
		

		if self.use_bias:
			bias_shape = (4 * self.filters,)
			self.bias = self.add_weight(
				bias_shape,
				initializer=self.bias_initializer,
				name='bias',
				regularizer=self.bias_regularizer,
				constraint=self.bias_constraint
			)

		else:
			self.bias = None

		# Set input spec.
		self.input_spec = InputSpec(ndim=self.rank + 2,
									axes={channel_axis: input_dim * 4})
		self.built = True
 def __init__(self, **kwargs):
     super(FlattenCaps, self).__init__(**kwargs)
     self.input_spec = InputSpec(min_ndim=4)
Esempio n. 10
0
 def build(self, input_shape):
     self.input_spec = [InputSpec(shape=input_shape)]
     shape = (input_shape[self.axis], )
     init_gamma = self.scale * np.ones(shape)
     self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))
     self.trainable_weights = [self.gamma]
 def __init__(self, padding=(1, 1), **kwargs):
     if type(padding) == int:
         padding = (padding, padding)
     self.padding = padding
     self.input_spec = [InputSpec(ndim=4)]
     super(ReflectionPadding2D, self).__init__(**kwargs)
Esempio n. 12
0
 def __init__(self, **kwargs):
     super(LocalPooling1D, self).__init__(**kwargs)
     self.supports_masking = True
     self.input_spec = [InputSpec(ndim=3), InputSpec(ndim=2)]
Esempio n. 13
0
 def __init__(self, n, **kwargs):
     self.n = n
     self.input_spec = [InputSpec(ndim=3)]
     super(RepeatVector4D, self).__init__(**kwargs)
Esempio n. 14
0
    def build(self, input_shape):
        if self.verbose:
            print('Build started')
        if type(input_shape) == list:
            self.input_spec = [
                InputSpec(shape=input_shape[0]),
                InputSpec(shape=(input_shape[1]))
            ]
        else:
            self.input_spec = [InputSpec(shape=input_shape)]
            input_shape = [input_shape, None]
        input_dim = input_shape[0][1]

        def combine_nodes(left, right):
            parent_node = Node()
            parent_node.left = left
            parent_node.right = right
            parent_node.code = left.code + right.code
            parent_node.frequency = left.frequency + right.frequency
            return parent_node

        # Generate leaves of Huffman tree.
        leaves = [
            Lambda(lambda x: K.cast(x * 0 + i, dtype='int32'))
            for i in range(self.nb_classes)
        ]
        # Set attribs for leaves
        for l in range(len(leaves)):
            leaf = leaves[l]
            leaf.built = True
            leaf.code = [l]
            leaf.frequency = self.frequency_table[l]
        # Build Huffman tree.
        if self.verbose:
            print('Building huffman tree...')
        un_merged_nodes = leaves[:]
        self.nodes = []
        frequencies = [l.frequency for l in leaves]
        # We keep merging 2 least frequency nodes, until only the root node remains. Classic Huffman tree, nothing fancy.
        prev_p = 0
        while len(un_merged_nodes) > 1:
            p = int(100. * (self.nb_classes - len(un_merged_nodes) + 1) /
                    self.nb_classes)
            if self.verbose:
                if p > prev_p:
                    sys.stdout.write('\r' + str(p) + ' %')
                    prev_p = p
            min_frequency_node = np.argmin(frequencies)
            left = un_merged_nodes.pop(min_frequency_node)
            frequencies.pop(min_frequency_node)
            min_frequency_node = np.argmin(frequencies)
            right = un_merged_nodes.pop(min_frequency_node)
            frequencies.pop(min_frequency_node)
            parent_node = combine_nodes(left, right)
            self.nodes += [parent_node]
            un_merged_nodes += [parent_node]
            frequencies += [parent_node.frequency]
        if self.verbose:
            sys.stdout.write('\r100 %')
            print('Huffman tree build complete')
        self.root_node = un_merged_nodes[0]
        self.nodes += [self.root_node]
        self.node_indices = {self.nodes[i]: i for i in range(len(self.nodes))}
        self.node_indices.update({leaves[i]: i for i in range(len(leaves))})
        self.leaves = leaves
        # Set paths and huffman codes
        self.paths = []
        self.huffman_codes = []
        self.one_hot_huffman_codes = []
        for i in range(self.nb_classes):
            path, huffman_code = self._traverse_huffman_tree(i)
            self.paths += [path]
            self.huffman_codes += [huffman_code]
            one_hot_huffman_code = [([1, 0] if c == 0 else [0, 1])
                                    for c in huffman_code]
            self.one_hot_huffman_codes += [one_hot_huffman_code]
        self.max_tree_depth = max(map(len, self.huffman_codes))
        for huffman_code in self.huffman_codes:
            huffman_code += [0] * (self.max_tree_depth - len(huffman_code))
        self.padded_one_hot_huffman_codes = self.one_hot_huffman_codes[:]
        for one_hot_huffman_code in self.padded_one_hot_huffman_codes:
            one_hot_huffman_code += [[1, 1]] * (self.max_tree_depth -
                                                len(one_hot_huffman_code))

        if self.verbose:
            print('Setting weights...')

        self.W = self.init((len(self.nodes), input_dim, 1))
        if self.bias:
            self.b = K.zeros((len(self.nodes), 1))
            self.trainable_weights = [self.W, self.b]
        else:

            self.trainable_weights = [self.W]

        self.regularizers = []
        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.bias and self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        self.constraints = {}
        if self.W_constraint:
            self.constraints[self.W] = self.W_constraint
        if self.bias and self.b_constraint:
            self.constraints[self.b] = self.b_constraint

        if hasattr(self, 'initial_weights') and self.initial_weights:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        # Class -> path map
        self.class_path_map = K.variable(np.array([[
            self.node_indices[node] for node in path + [self.root_node] *
            (self.max_tree_depth - len(path))
        ] for path in self.paths]),
                                         dtype='int32')
        super(Huffmax, self).build(input_shape)
        if self.verbose:
            print('Done.')
Esempio n. 15
0
    def build(self, input_shape):
        """build"""
        if self.data_format == "channels_first":
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError(
                "The channel dimension of the inputs "
                "should be defined. Found `None`."
            )
        input_dim = input_shape[channel_axis] // 2
        if False and self.transposed:
            self.kernel_shape = self.kernel_size + (self.filters, input_dim)
        else:
            self.kernel_shape = self.kernel_size + (input_dim, self.filters)
        # The kernel shape here is a complex kernel shape:
        #   nb of complex feature maps = input_dim;
        #   nb of output complex feature maps = self.filters;
        #   imaginary kernel size = real kernel size
        #                         = self.kernel_size
        #                         = complex kernel size
        if self.kernel_initializer in {"complex", "complex_independent"}:
            kls = {
                "complex": ComplexInit,
                "complex_independent": ComplexIndependentFilters,
            }[
                self.kernel_initializer
            ]
            kern_init = kls(
                kernel_size=self.kernel_size,
                input_dim=input_dim,
                weight_dim=self.rank,
                nb_filters=self.filters,
                criterion=self.init_criterion,
            )
        else:
            kern_init = self.kernel_initializer

        self.kernel = self.add_weight(
            "kernel",
            self.kernel_shape,
            initializer=kern_init,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
        )

        if self.normalize_weight:
            gamma_shape = (input_dim * self.filters,)
            self.gamma_rr = self.add_weight(
                shape=gamma_shape,
                name="gamma_rr",
                initializer=self.gamma_diag_initializer,
                regularizer=self.gamma_diag_regularizer,
                constraint=self.gamma_diag_constraint,
            )
            self.gamma_ii = self.add_weight(
                shape=gamma_shape,
                name="gamma_ii",
                initializer=self.gamma_diag_initializer,
                regularizer=self.gamma_diag_regularizer,
                constraint=self.gamma_diag_constraint,
            )
            self.gamma_ri = self.add_weight(
                shape=gamma_shape,
                name="gamma_ri",
                initializer=self.gamma_off_initializer,
                regularizer=self.gamma_off_regularizer,
                constraint=self.gamma_off_constraint,
            )
        else:
            self.gamma_rr = None
            self.gamma_ii = None
            self.gamma_ri = None

        if self.use_bias:
            bias_shape = (2 * self.filters,)
            self.bias = self.add_weight(
                "bias",
                bias_shape,
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                constraint=self.bias_constraint,
            )

        else:
            self.bias = None

        # Set input spec.
        self.input_spec = InputSpec(
            ndim=self.rank + 2, axes={channel_axis: input_dim * 2}
        )
        self.built = True
 def __init__(self, **kwargs):
     super(CapsToScalars, self).__init__(**kwargs)
     self.input_spec = InputSpec(min_ndim=3)
Esempio n. 17
0
 def __init__(self, k=1, **kwargs):
     super().__init__(**kwargs)
     self.input_spec = InputSpec(ndim=3)
     self.k = k
 def build(self, input_shape):
     self.input_spec = [InputSpec(shape=input_shape)]
     super(DecodeDetectionsFast, self).build(input_shape)
 def __init__(self, rate, **kwargs):
     super(TimestepDropout, self).__init__(rate, **kwargs)
     self.input_spec = InputSpec(ndim=3)
Esempio n. 20
0
    def build(self, input_shape):

        ndim = len(input_shape)

        dim = input_shape[self.axis]
        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')
        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})

        param_shape = (input_shape[self.axis] // 2, )

        if self.scale:
            self.gamma_rr = self.add_weight(
                shape=param_shape,
                name='gamma_rr',
                initializer=self.gamma_diag_initializer,
                regularizer=self.gamma_diag_regularizer,
                constraint=self.gamma_diag_constraint)
            self.gamma_ii = self.add_weight(
                shape=param_shape,
                name='gamma_ii',
                initializer=self.gamma_diag_initializer,
                regularizer=self.gamma_diag_regularizer,
                constraint=self.gamma_diag_constraint)
            self.gamma_ri = self.add_weight(
                shape=param_shape,
                name='gamma_ri',
                initializer=self.gamma_off_initializer,
                regularizer=self.gamma_off_regularizer,
                constraint=self.gamma_off_constraint)
            self.moving_Vrr = self.add_weight(
                shape=param_shape,
                initializer=self.moving_variance_initializer,
                name='moving_Vrr',
                trainable=False)
            self.moving_Vii = self.add_weight(
                shape=param_shape,
                initializer=self.moving_variance_initializer,
                name='moving_Vii',
                trainable=False)
            self.moving_Vri = self.add_weight(
                shape=param_shape,
                initializer=self.moving_covariance_initializer,
                name='moving_Vri',
                trainable=False)
        else:
            self.gamma_rr = None
            self.gamma_ii = None
            self.gamma_ri = None
            self.moving_Vrr = None
            self.moving_Vii = None
            self.moving_Vri = None

        if self.center:
            self.beta = self.add_weight(shape=(input_shape[self.axis], ),
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
            self.moving_mean = self.add_weight(
                shape=(input_shape[self.axis], ),
                initializer=self.moving_mean_initializer,
                name='moving_mean',
                trainable=False)
        else:
            self.beta = None
            self.moving_mean = None

        self.built = True
Esempio n. 21
0
    def build(self, input_shape):
        assert len(input_shape) == 2
        assert input_shape[-1] % 4 == 0
        input_dim = input_shape[-1] // 4
        data_format = K.image_data_format()
        kernel_shape = (input_dim, self.units)
        fan_in, fan_out = initializers._compute_fans(kernel_shape,
                                                     data_format=data_format)
        if self.init_criterion == 'he':
            s = tf.sqrt(tf.cast(1. / fan_in, tf.float32))
        elif self.init_criterion == 'glorot':
            s = tf.sqrt(tf.cast(1. / (fan_in + fan_out), tf.float32))

        # Equivalent initialization using amplitude phase representation:
        """modulus = rng.rayleigh(scale=s, size=kernel_shape)
        phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
        def init_w_real(shape, dtype=None):
            return modulus * K.cos(phase)
        def init_w_imag(shape, dtype=None):
            return modulus * K.sin(phase)"""

        # Initialization using euclidean representation:
        def init_w_r(shape, dtype=None):
            return K.random_normal_variable(kernel_shape,
                                            mean=0,
                                            scale=s,
                                            dtype=tf.float32,
                                            seed=self.seed)

        def init_w_i(shape, dtype=None):
            return K.random_normal_variable(kernel_shape,
                                            mean=0,
                                            scale=s,
                                            dtype=tf.float32,
                                            seed=self.seed)

        def init_w_j(shape, dtype=None):
            return K.random_normal_variable(kernel_shape,
                                            mean=0,
                                            scale=s,
                                            dtype=tf.float32,
                                            seed=self.seed)

        def init_w_k(shape, dtype=None):
            return K.random_normal_variable(kernel_shape,
                                            mean=0,
                                            scale=s,
                                            dtype=tf.float32,
                                            seed=self.seed)

        if self.kernel_initializer in {'quaternion'}:
            r_init = init_w_r
            i_init = init_w_i
            j_init = init_w_j
            k_init = init_w_k
        else:
            r_init = self.kernel_initializer
            i_init = self.kernel_initializer
            j_init = self.kernel_initializer
            k_init = self.kernel_initializer

        self.r_kernel = self.add_weight(shape=kernel_shape,
                                        initializer=r_init,
                                        name='r_kernel',
                                        regularizer=self.kernel_regularizer,
                                        constraint=self.kernel_constraint)
        self.i_kernel = self.add_weight(shape=kernel_shape,
                                        initializer=i_init,
                                        name='i_kernel',
                                        regularizer=self.kernel_regularizer,
                                        constraint=self.kernel_constraint)
        self.j_kernel = self.add_weight(shape=kernel_shape,
                                        initializer=j_init,
                                        name='j_kernel',
                                        regularizer=self.kernel_regularizer,
                                        constraint=self.kernel_constraint)
        self.k_kernel = self.add_weight(shape=kernel_shape,
                                        initializer=k_init,
                                        name='k_kernel',
                                        regularizer=self.kernel_regularizer,
                                        constraint=self.kernel_constraint)

        if self.use_bias:
            self.bias = self.add_weight(shape=(4 * self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        self.input_spec = InputSpec(ndim=2, axes={-1: 4 * input_dim})
        self.built = True
Esempio n. 22
0
 def __init__(self, padding=(1, 1), **kwargs):
     self.padding = tuple(padding)
     self.input_spec = [InputSpec(ndim=4)]
     super(ReflectionPadding2D, self).__init__(**kwargs)
Esempio n. 23
0
    def build(self, input_shape):

        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis] // 2
        self.kernel_shape = self.kernel_size + (input_dim, self.filters)
        # The kernel shape here is a complex kernel shape:
        #   nb of complex feature maps = input_dim;
        #   nb of output complex feature maps = self.filters;
        #   imaginary kernel size = real kernel size
        #                         = self.kernel_size
        #                         = complex kernel size
        if self.kernel_initializer in {'complex', 'complex_independent'}:
            kls = {
                'complex': ComplexInit,
                'complex_independent': ComplexIndependentFilters
            }[self.kernel_initializer]
            kern_init = kls(kernel_size=self.kernel_size,
                            input_dim=input_dim,
                            weight_dim=self.rank,
                            nb_filters=self.filters,
                            criterion=self.init_criterion)
        else:
            kern_init = self.kernel_initializer

        self.kernel = self.add_weight(self.kernel_shape,
                                      initializer=kern_init,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        if self.normalize_weight:
            gamma_shape = (input_dim * self.filters, )
            self.gamma_rr = self.add_weight(
                shape=gamma_shape,
                name='gamma_rr',
                initializer=self.gamma_diag_initializer,
                regularizer=self.gamma_diag_regularizer,
                constraint=self.gamma_diag_constraint)
            self.gamma_ii = self.add_weight(
                shape=gamma_shape,
                name='gamma_ii',
                initializer=self.gamma_diag_initializer,
                regularizer=self.gamma_diag_regularizer,
                constraint=self.gamma_diag_constraint)
            self.gamma_ri = self.add_weight(
                shape=gamma_shape,
                name='gamma_ri',
                initializer=self.gamma_off_initializer,
                regularizer=self.gamma_off_regularizer,
                constraint=self.gamma_off_constraint)
        else:
            self.gamma_rr = None
            self.gamma_ii = None
            self.gamma_ri = None

        if self.use_bias:
            bias_shape = (2 * self.filters, )
            self.bias = self.add_weight(bias_shape,
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)

        else:
            self.bias = None

        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim * 2})
        self.built = True
    def build(self, input_shape):
        assert len(input_shape) == 2
        assert input_shape[-1] % 2 == 0
        input_dim = input_shape[-1] // 4
        data_format = K.image_data_format()
        kernel_shape = (input_dim, self.units)
        fan_in, fan_out = initializers._compute_fans(kernel_shape,
                                                     data_format=data_format)
        if self.init_criterion == 'he':
            s = 1. / np.sqrt(2 * fan_in)
        elif self.init_criterion == 'glorot':
            s = 1. / np.sqrt(2 * (fan_in + fan_out))
        rng = RandomStreams(seed=self.seed)

        # Equivalent initialization using amplitude phase representation:
        """modulus = rng.rayleigh(scale=s, size=kernel_shape)
        phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
        def init_w_real(shape, dtype=None):
            return modulus * K.cos(phase)
        def init_w_imag(shape, dtype=None):
            return modulus * K.sin(phase)"""

        # Initialization using euclidean representation:
        def init_w_real(shape, dtype=None):
            return rng.normal(size=kernel_shape, avg=0, std=s, dtype=dtype)

        def init_w_imag(shape, dtype=None):
            return rng.normal(size=kernel_shape, avg=0, std=s, dtype=dtype)

        if self.kernel_initializer in {'complex'}:
            real_init = init_w_real
            imag_init = init_w_imag
        else:
            real_init = self.kernel_initializer
            imag_init = self.kernel_initializer

        self.r = self.add_weight(shape=kernel_shape,
                                 initializer=real_init,
                                 name='r',
                                 regularizer=self.kernel_regularizer,
                                 constraint=self.kernel_constraint)
        self.i = self.add_weight(shape=kernel_shape,
                                 initializer=imag_init,
                                 name='i',
                                 regularizer=self.kernel_regularizer,
                                 constraint=self.kernel_constraint)
        self.j = self.add_weight(shape=kernel_shape,
                                 initializer=imag_init,
                                 name='j',
                                 regularizer=self.kernel_regularizer,
                                 constraint=self.kernel_constraint)
        self.k = self.add_weight(shape=kernel_shape,
                                 initializer=imag_init,
                                 name='k',
                                 regularizer=self.kernel_regularizer,
                                 constraint=self.kernel_constraint)

        if self.use_bias:
            self.bias = self.add_weight(shape=(4 * self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        self.input_spec = InputSpec(ndim=2, axes={-1: 4 * input_dim})
        self.built = True
Esempio n. 25
0
 def build(self, input_shape):
     self.input_spec = [InputSpec(shape=input_shape)]
     super(RNNCell, self).build(input_shape)
Esempio n. 26
0
 def build(self, input_shape):
     self.beta = self.add_weight(shape=[1],
                                 name='beta',
                                 initializer=self.beta_initializer)
     self.input_spec = InputSpec(ndim=len(input_shape))
     self.built = True