def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Dense, self).__init__(**kwargs) self.units = int(units) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) # Not implemented arguments default_args_check(kernel_regularizer, "kernel_regularizer", "Dense") default_args_check(bias_regularizer, "bias_regularizer", "Dense") default_args_check(activity_regularizer, "activity_regularizer", "Dense") default_args_check(kernel_constraint, "kernel_constraint", "Dense") default_args_check(bias_constraint, "bias_constraint", "Dense")
def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__(**kwargs) self.rank = 2 self.kernel_size = conv_utils.normalize_tuple( kernel_size, self.rank, 'kernel_size') if self.kernel_size[0] != self.kernel_size[1]: raise NotImplementedError("TF Encrypted currently only supports same " "stride along the height and the width." "You gave: {}".format(self.kernel_size)) self.strides = conv_utils.normalize_tuple(strides, self.rank, 'strides') self.padding = conv_utils.normalize_padding(padding).upper() self.depth_multiplier = depth_multiplier self.data_format = conv_utils.normalize_data_format(data_format) if activation is not None: logger.info("Performing an activation before a pooling layer can result " "in unnecessary performance loss. Check model definition in " "case of missed optimization.") self.activation = activations.get(activation) self.use_bias = use_bias self.depthwise_initializer = initializers.get(depthwise_initializer) self.bias_initializer = initializers.get(bias_initializer) # Not implemented arguments default_args_check(depthwise_regularizer, "depthwise_regularizer", "DepthwiseConv2D") default_args_check(bias_regularizer, "bias_regularizer", "DepthwiseConv2D") default_args_check(activity_regularizer, "activity_regularizer", "DepthwiseConv2D") default_args_check(depthwise_constraint, "depthwise_constraint", "DepthwiseConv2D") default_args_check(bias_constraint, "bias_constraint", "DepthwiseConv2D")
def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Dense, self).__init__(**kwargs) self.units = int(units) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) if kernel_regularizer: raise NotImplementedError( arg_not_impl_msg.format("kernel_regularizer", "Dense"), ) if bias_regularizer: raise NotImplementedError( arg_not_impl_msg.format("bias_regularizer", "Dense"), ) if activity_regularizer: raise NotImplementedError( arg_not_impl_msg.format("activity_regularizer", "Dense"), ) if kernel_constraint: raise NotImplementedError( arg_not_impl_msg.format("kernel_constraint", "Dense"), ) if bias_constraint: raise NotImplementedError( arg_not_impl_msg.format(" bias_constraint", "Dense"), )
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Conv2D, self).__init__(**kwargs) self.rank = 2 self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, self.rank, 'kernel_size') if self.kernel_size[0] != self.kernel_size[1]: raise NotImplementedError( "TF Encrypted currently only supports same " "stride along the height and the width." "You gave: {}".format(self.kernel_size)) self.strides = conv_utils.normalize_tuple(strides, self.rank, 'strides') self.padding = conv_utils.normalize_padding(padding).upper() self.data_format = conv_utils.normalize_data_format(data_format) if activation is not None: logger.info( "Performing an activation before a pooling layer can result " "in unnecessary performance loss. Check model definition in " "case of missed optimization.") self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) if dilation_rate: raise NotImplementedError( arg_not_impl_msg.format("dilation_rate", "Conv2d"), ) if kernel_regularizer: raise NotImplementedError( arg_not_impl_msg.format("kernel_regularizer", "Conv2d"), ) if bias_regularizer: raise NotImplementedError( arg_not_impl_msg.format("bias_regularizer", "Conv2d"), ) if activity_regularizer: raise NotImplementedError( arg_not_impl_msg.format("activity_regularizer", "Conv2d"), ) if kernel_constraint: raise NotImplementedError( arg_not_impl_msg.format("kernel_constraint", "Conv2d"), ) if bias_constraint: raise NotImplementedError( arg_not_impl_msg.format("bias_constraint", "Conv2d"), )
def __init__(self, activation, **kwargs): super(Activation, self).__init__(**kwargs) self.activation_identifier = activation self.activation = activations.get(self.activation_identifier)