def build(self, input_shape): input_shape = to_tuple(input_shape) if self.data_format == 'channels_first': stack_size = input_shape[1] self.kernel_shape = (self.filters, stack_size, self.nb_row, self.nb_col) self.kernel_norm_shape = (1, stack_size, self.nb_row, self.nb_col) elif self.data_format == 'channels_last': stack_size = input_shape[3] self.kernel_shape = (self.nb_row, self.nb_col, stack_size, self.filters) self.kernel_norm_shape = (self.nb_row, self.nb_col, stack_size, 1) else: raise ValueError('Invalid data_format:', self.data_format) self.W = self.add_weight(shape=self.kernel_shape, initializer=partial(self.kernel_initializer), name='{}_W'.format(self.name), regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) kernel_norm_name = '{}_kernel_norm'.format(self.name) self.kernel_norm = K.variable(np.ones(self.kernel_norm_shape), name=kernel_norm_name) if self.use_bias: self.b = self.add_weight(shape=(self.filters,), initializer='zero', name='{}_b'.format(self.name), regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.b = None if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True
def build(self, input_shape): input_shape = to_tuple(input_shape) param_shape = list(input_shape[1:]) self.param_broadcast = [False] * len(param_shape) if self.shared_axes is not None: for i in self.shared_axes: param_shape[i - 1] = 1 self.param_broadcast[i - 1] = True param_shape = tuple(param_shape) self.t_left = self.add_weight(shape=param_shape, name='t_left', initializer=self.t_left_initializer) self.a_left = self.add_weight(shape=param_shape, name='a_left', initializer=self.a_left_initializer) self.t_right = self.add_weight(shape=param_shape, name='t_right', initializer=self.t_right_initializer) self.a_right = self.add_weight(shape=param_shape, name='a_right', initializer=self.a_right_initializer) # Set input spec axes = {} if self.shared_axes: for i in range(1, len(input_shape)): if i not in self.shared_axes: axes[i] = input_shape[i] self.input_spec = InputSpec(ndim=len(input_shape), axes=axes) self.built = True
def build(self, input_shape): input_shape = to_tuple(input_shape) ndim = len(input_shape) assert ndim >= 2 input_dim = input_shape[-1] self.input_dim = input_dim self.input_spec = [InputSpec(dtype=K.floatx(), ndim=ndim)] self.kernel = self.add_weight( shape=(input_dim, self.units), initializer=self.kernel_initializer, name="{}_W".format(self.name), regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, ) if self.use_bias: self.bias = self.add_weight( shape=(self.units,), initializer="zero", name="{}_b".format(self.name), regularizer=self.bias_regularizer, constraint=self.bias_constraint, ) else: self.bias = None if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True
def build(self, input_shape): input_shape = to_tuple(input_shape) param_shape = list(input_shape[1:]) self.param_broadcast = [False] * len(param_shape) if self.shared_axes is not None: for i in self.shared_axes: param_shape[i - 1] = 1 self.param_broadcast[i - 1] = True param_shape = tuple(param_shape) # Initialised as ones to emulate the default ELU self.alpha = self.add_weight(shape=param_shape, name='alpha', initializer=self.alpha_initializer, regularizer=self.alpha_regularizer, constraint=self.alpha_constraint) self.beta = self.add_weight(shape=param_shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) # Set input spec axes = {} if self.shared_axes: for i in range(1, len(input_shape)): if i not in self.shared_axes: axes[i] = input_shape[i] self.input_spec = InputSpec(ndim=len(input_shape), axes=axes) self.built = True
def build(self, input_shape): input_shape = to_tuple(input_shape) input_dim_capsule = input_shape[-1] if self.share_weights: self.W = self.add_weight(name='capsule_kernel', shape=(1, input_dim_capsule, self.num_capsule * self.dim_capsule), initializer=self.initializer, regularizer=self.regularizer, constraint=self.constraint, trainable=True) else: input_num_capsule = input_shape[-2] self.W = self.add_weight(name='capsule_kernel', shape=(input_num_capsule, input_dim_capsule, self.num_capsule * self.dim_capsule), initializer=self.initializer, regularizer=self.regularizer, constraint=self.constraint, trainable=True) self.build = True
def build(self, input_shape): input_shape = to_tuple(input_shape) new_input_shape = input_shape[1:] self.alphas = self.add_weight(shape=new_input_shape, name='{}_alphas'.format(self.name), initializer=self.alpha_initializer, trainable=self.trainable) self.build = True
def build(self, input_shape): input_shape = to_tuple(input_shape) self.input_spec = [InputSpec(shape=input_shape)] self.input_dim = input_shape[-1] self.kernel = self.add_weight( shape=(self.input_dim, self.units), name="kernel", initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, ) self.chain_kernel = self.add_weight( shape=(self.units, self.units), name="chain_kernel", initializer=self.chain_initializer, regularizer=self.chain_regularizer, constraint=self.chain_constraint, ) if self.use_bias: self.bias = self.add_weight( shape=(self.units,), name="bias", initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, ) else: self.bias = 0 if self.use_boundary: self.left_boundary = self.add_weight( shape=(self.units,), name="left_boundary", initializer=self.boundary_initializer, regularizer=self.boundary_regularizer, constraint=self.boundary_constraint, ) self.right_boundary = self.add_weight( shape=(self.units,), name="right_boundary", initializer=self.boundary_initializer, regularizer=self.boundary_regularizer, constraint=self.boundary_constraint, ) self.built = True
def build(self, input_shape=None): input_shape = to_tuple(input_shape) if len(input_shape) == 2: # Dense_layer input_dim = np.prod(input_shape[-1]) # we drop only last dim elif 3 <= len(input_shape) <= 5: # Conv_layers input_dim = ( input_shape[1] if K.image_data_format() == 'channels_first' else input_shape[-1] # we drop only channels ) else: raise ValueError( 'concrete_dropout currenty supports only Dense/Conv layers') self.input_spec = InputSpec(shape=input_shape) if not self.layer.built: self.layer.build(input_shape) self.layer.built = True # initialise p self.p_logit = self.layer.add_weight(name='p_logit', shape=(1, ), initializer=RandomUniform( *np.log(self._prob_init), seed=self._seed), trainable=True) self.p = K.squeeze(K.sigmoid(self.p_logit), axis=0) super(ConcreteDropout, self).build(input_shape) # initialize regularizer / prior KL term and add to layer-loss weight = self.layer.kernel kernel_regularizer = (self.weight_regularizer * K.sum(K.square(weight)) / (1. - self.p)) dropout_regularizer = (self.p * K.log(self.p) + (1. - self.p) * K.log(1. - self.p) ) * self.dropout_regularizer * input_dim regularizer = K.sum(kernel_regularizer + dropout_regularizer) self.layer.add_loss(regularizer)