Ejemplo n.º 1
0
    def __init__(self, layers=None, slic = 1, concat_axis=-1, init='glorot_uniform',
                 arguments=None, node_indices=None, tensor_indices=None,
                 name=None):
        self.layers = layers
        self.slic = slic
        self.batch_size = 32
        self.node_indices = node_indices
        self.init = initializations.get(init)
        self.arguments = arguments if arguments else {}
        self.mode = "concat"
        # Layer parameters.
        self.inbound_nodes = []
        self.outbound_nodes = []
        self.constraints = {}   
        self._trainable_weights = []
        self._non_trainable_weights = []
        self.regularizers = []
        self.supports_masking = False
        self.uses_learning_phase = False
        self.input_spec = None  # Compatible with anything.
        if not name:
            prefix = self.__class__.__name__.lower()
            name = prefix + '_' + str(K.get_uid(prefix))
        self.name = name

        if layers:
            if not node_indices:
                node_indices = [0 for _ in range(len(layers))]
            
            input_shape = self._arguments_validation(layers,
                                       node_indices, tensor_indices)
            self.build(input_shape)
            self.add_inbound_node(layers, node_indices, tensor_indices)
        else:
            self.built = False
Ejemplo n.º 2
0
    def __init__(self, hid, layers=None, name=None):
        super(Autoencoder, self).__init__()
        self.layers = []  # Stack of layers.
        self.model = None  # Internal Model instance.
        self.inputs = []  # List of input tensors
        self.outputs = []  # List of length 1: the output tensor (unique).
        self._trainable = True
        self._initial_weights = None
        self.hid = hid
        # Model attributes.
        self.inbound_nodes = []
        self.outbound_nodes = []
        self.built = False
        self.pretrain = False

        # Set model name.
        if not name:
            prefix = 'sequential_'
            name = prefix + str(K.get_uid(prefix))
        self.name = name

        # Add to the model any layers passed to the constructor.
        if layers:
            for layer in layers:
                self.add(layer)
Ejemplo n.º 3
0
def ConvNeXtBlock(
    projection_dim, drop_path_rate=0.0, layer_scale_init_value=1e-6, name=None
):
    """ConvNeXt block.

    References:
    - https://arxiv.org/abs/2201.03545
    - https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py

    Notes:
      In the original ConvNeXt implementation (linked above), the authors use
      `Dense` layers for pointwise convolutions for increased efficiency.
      Following that, this implementation also uses the same.

    Args:
      projection_dim (int): Number of filters for convolution layers. In the
        ConvNeXt paper, this is referred to as projection dimension.
      drop_path_rate (float): Probability of dropping paths. Should be within
        [0, 1].
      layer_scale_init_value (float): Layer scale value. Should be a small float
        number.
      name: name to path to the keras layer.

    Returns:
      A function representing a ConvNeXtBlock block.
    """
    if name is None:
        name = "prestem" + str(backend.get_uid("prestem"))

    def apply(inputs):
        x = inputs

        x = layers.Conv2D(
            filters=projection_dim,
            kernel_size=7,
            padding="same",
            groups=projection_dim,
            name=name + "_depthwise_conv",
        )(x)
        x = layers.LayerNormalization(epsilon=1e-6, name=name + "_layernorm")(x)
        x = layers.Dense(4 * projection_dim, name=name + "_pointwise_conv_1")(x)
        x = layers.Activation("gelu", name=name + "_gelu")(x)
        x = layers.Dense(projection_dim, name=name + "_pointwise_conv_2")(x)

        if layer_scale_init_value is not None:
            x = LayerScale(
                layer_scale_init_value,
                projection_dim,
                name=name + "_layer_scale",
            )(x)
        if drop_path_rate:
            layer = StochasticDepth(
                drop_path_rate, name=name + "_stochastic_depth"
            )
        else:
            layer = layers.Activation("linear", name=name + "_identity")

        return inputs + layer(x)

    return apply
Ejemplo n.º 4
0
    def __init__(self, inputs, name=None):
        if K.backend() == 'theano' or K.image_dim_ordering() == 'th':
            raise RuntimeError(
                "Only support tensorflow backend or image ordering")

        self.inputs = inputs

        # Layer parameters
        self.inbound_nodes = []
        self.outbound_nodes = []
        self.constraints = {}
        self._trainable_weights = []
        self._non_trainable_weights = []
        self.supports_masking = True
        self.uses_learning_phase = False
        self.input_spec = None
        self.trainable = False

        if not name:
            prefix = self.__class__.__name__.lower()
            name = prefix + '_' + str(K.get_uid(prefix))

        self.name = name

        if inputs:
            # The inputs is a bunch of nodes shares the same input.
            self.built = True
            # self.add_inbound_node(inputs, node_indices, tensor_indices)
        else:
            self.built = False
Ejemplo n.º 5
0
def PreStem(name=None):
    """Normalizes inputs with ImageNet-1k mean and std.

    Args:
      name (str): Name prefix.

    Returns:
      A presemt function.
    """
    if name is None:
        name = "prestem" + str(backend.get_uid("prestem"))

    def apply(x):
        x = layers.Normalization(
            mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
            variance=[
                (0.229 * 255) ** 2,
                (0.224 * 255) ** 2,
                (0.225 * 255) ** 2,
            ],
            name=name + "_prestem_normalization",
        )(x)
        return x

    return apply
Ejemplo n.º 6
0
    def __init__(self,
                 size,
                 initializer='glorot_uniform',
                 regularizer=None,
                 name=None,
                 **kwargs):
        self.size = tuple(size)
        self.initializer = initializers.get(initializer)
        self.regularizer = regularizers.get(regularizer)

        if not name:
            prefix = 'shared_weight'
            name = prefix + '_' + str(K.get_uid(prefix))

        Layer.__init__(self, name=name, **kwargs)

        with K.name_scope(self.name):
            self.kernel = self.add_weight(shape=self.size,
                                          initializer=self.initializer,
                                          name='kernel',
                                          regularizer=self.regularizer)

        self.trainable = True
        self.built = True
        # self.sparse = sparse

        # input_tensor = self.kernel * 1.0

        self.is_placeholder = False
Ejemplo n.º 7
0
 def _shared_name(self):
     """Returns a shared name to be used by the table."""
     shared_name = "NULL_INITIALIZER_"
     if tf.executing_eagerly():
         # Ensure a unique name when eager execution is enabled to avoid spurious
         # sharing issues..
         shared_name += str(backend.get_uid(shared_name))
     return shared_name
Ejemplo n.º 8
0
    def __init__(self, **kwargs):
        ''' Constructor of a composed layer, which should be called as the last function call of the constructor of its sub class by that sub class to construct its children layers.
        Note that most of following lines are shamelessly adapted from keras
        '''
        # Logic copied from layer with small adaption
        if not hasattr(self, 'input_spec'):
            self.input_spec = None
        if not hasattr(self, 'supports_masking'):
            self.supports_masking = False

        self._uses_learning_phase = False

        # these lists will be filled via successive calls
        # to self.add_inbound_node()
        self.inbound_nodes = []
        self.outbound_nodes = []

        self._trainable_weights = []
        self._non_trainable_weights = []
        self._regularizers = []
        self._constraints = {}  # dict {tensor: constraint instance}
        self.built = False

        # these properties should be set by the user via keyword arguments.
        # note that 'input_dtype', 'input_shape' and 'batch_input_shape'
        # are only applicable to input layers: do not pass these keywords
        # to non-input layers.
        allowed_kwargs = {
            'input_shape', 'batch_input_shape', 'input_dtype', 'name',
            'trainable', 'create_input_layer'
        }
        for kwarg in kwargs.keys():
            assert kwarg in allowed_kwargs, 'Keyword argument not understood: ' + kwarg

        name = kwargs.get('name')
        if not name:
            prefix = self.__class__.__name__.lower()
            name = prefix + '_' + str(K.get_uid(prefix))
        self.name = name

        self.trainable = kwargs.get('trainable', True)
        if 'batch_input_shape' in kwargs or 'input_shape' in kwargs:
            # in this case we will create an input layer
            # to insert before the current layer
            if 'batch_input_shape' in kwargs:
                batch_input_shape = tuple(kwargs['batch_input_shape'])
            elif 'input_shape' in kwargs:
                batch_input_shape = (None, ) + tuple(kwargs['input_shape'])
            self.batch_input_shape = batch_input_shape
            input_dtype = kwargs.get('input_dtype', K.floatx())
            self.input_dtype = input_dtype
            if 'create_input_layer' in kwargs:
                self.create_input_layer(batch_input_shape, input_dtype)

        self._updates = []
        self._stateful = False
        self._layers = []
Ejemplo n.º 9
0
    def __init__(self,
                 window_size,
                 axes,
                 layers=None,
                 name=None,
                 node_indices=None,
                 tensor_indices=None,
                 output_shape=None,
                 output_mask=None,
                 **kwargs):
        self.window_size = window_size
        self.shape_axes = axes

        if axes == 1:
            self.transform_axis = 2
        elif axes == 2:
            self.transform_axis = 1
        else:
            #TODO maybe throw error
            self.transform_axis = axes

        self._output_shape = output_shape
        self._output_mask = output_mask

        self.inbound_nodes = []
        self.outbound_nodes = []
        self.constraints = {}
        self.regularizers = []
        self.trainable_weights = []
        self.non_trainable_weights = []
        self.supports_masking = False
        self.uses_learning_phase = False
        self.input_spec = None  # compatible with whatever
        self.input_dim = None

        self.input_layers = layers
        self.node_indices = node_indices
        self.tensor_indices = tensor_indices
        self.trainable = False
        if not name:
            prefix = self.__class__.__name__.lower()
            name = prefix + '_' + str(K.get_uid(prefix))
        self.name = name
        if layers:
            # this exists for backwards compatibility.
            # equivalent to:
            # merge = Merge(layers=None)
            # output = merge([input_tensor_1, input_tensor_2])
            if not node_indices:
                # by default we connect to
                # the 1st output stream in the input layer
                node_indices = [0 for _ in range(len(layers))]
            self.built = True
            self.add_inbound_node(self.input_layers, self.node_indices,
                                  self.tensor_indices)

        self.trainable_weights = []
Ejemplo n.º 10
0
    def __init__(self,
                 input_shape=None,
                 batch_size=None,
                 batch_input_shape=None,
                 dtype=None,
                 input_tensor=None,
                 sparse=False,
                 name=None):
        #self.input_spec = None
        self.supports_masking = False
        self.uses_learning_phase = False
        self.trainable = False
        self.built = True

        self._inbound_nodes = []
        self._outbound_nodes = []

        self.trainable_weights = []
        self.non_trainable_weights = []
        self.constraints = {}

        self.sparse = sparse

        if not name:
            prefix = 'input'
            name = prefix + '_' + str(K.get_uid(prefix))
        self.name = name

        if not batch_input_shape:
            assert input_shape, 'An Input layer should be passed either a `batch_input_shape` or an `input_shape`.'
            batch_input_shape = (None, ) + tuple(input_shape)
        else:
            batch_input_shape = tuple(batch_input_shape)
        if not dtype:
            dtype = K.floatx()

        self.batch_input_shape = batch_input_shape
        self.dtype = dtype

        input_tensor = K.placeholder(shape=batch_input_shape,
                                     dtype=dtype,
                                     sparse=self.sparse,
                                     name=self.name)

        input_tensor._uses_learning_phase = False
        input_tensor._keras_history = (self, 0, 0)
        shape = input_tensor._keras_shape
        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[input_tensor],
             output_tensors=[input_tensor],
             input_masks=[None],
             output_masks=[None],
             input_shapes=[shape],
             output_shapes=[shape])
Ejemplo n.º 11
0
    def __init__(
        self,
        nr_feats: int,
        nr_classes: int,
        nr_samples: int,
        batch_size: int,
        epochs: int,
        metric_stop_val: float,
        hid_layers_sizes: List[int],
        activation: str = "tanh",
        name: str = None,
        ensemble_id=None,
        verbose: bool = True,
        watch_stop_val: bool = True,
        generation: int = 0,
        parent_id: str = "no_parent",
        net_id: str = None,
        rambo_master_id: str = "MISSING_RAMBO_MASTER_ID",
        swap_noise: float = None,
        dropout: float = 0.0,
    ):
        self.dropout = dropout

        super().__init__(
            nr_feats,
            nr_classes,
            nr_samples,
            batch_size,
            epochs,
            metric_stop_val,
            hid_layers_sizes,
            activation,
            name,
            ensemble_id,
            verbose,
            watch_stop_val,
            generation,
            parent_id,
            net_id,
            rambo_master_id
        )

        self.nr_feats = nr_feats
        self.swap_noise = swap_noise
        self.is_boost_net = False

        self.run_fit_count = 0

        if not name:
            prefix = "rambo_net_"
            name = prefix + str(backend.get_uid(prefix))
        self.name = name

        self.model_path = "models/" + rambo_master_id + "/" + self.net_id + "_keras_model.h5"
Ejemplo n.º 12
0
    def __init__(
        self,
        nr_feats: int,
        nr_classes: int,
        nr_samples: int,
        batch_size: int,
        epochs: int,
        metric_stop_val: float,
        hid_layers_sizes: List[int],
        activation: str = "tanh",
        name: str = None,
        ensemble_id=None,
        verbose: bool = True,
        watch_stop_val: bool = True,
        generation: int = 0,
        parent_id: str = "no_parent",
        net_id: str = None,
        rambo_master_id: str = "MISSING_RAMBO_MASTER_ID"
    ):
        self.model = Sequential()
        self.built = False
        self.all_feats = nr_feats
        self.hid_layers_sizes = hid_layers_sizes
        self.nr_classes = nr_classes
        self.nr_samples = nr_samples
        self.activation = activation
        self.batch_size = batch_size
        self.epochs = epochs
        self.metric_stop_val = metric_stop_val
        self.label_enc = LabelEncoder()
        self.ensemble_id = ensemble_id
        self.verbose = verbose
        self.early_stopper = None
        self.watch_stop_val = watch_stop_val
        self.bag_train_acc = 0.0
        self.oob_train_acc = 0.0
        self.full_train_acc = 0.0
        self.val_acc = 0.0
        self.generation = generation
        self.parent_id = parent_id
        self.time_created = datetime.datetime.now()
        self.test_acc = -1.0

        if not name:
            prefix = "regular_net_"
            name = prefix + str(backend.get_uid(prefix))
        self.name = name

        self.init_model(nr_feats)
        self.net_id = net_id
        self.model_path = "models/" + rambo_master_id + "/" + self.net_id + "_keras_model.h5"
Ejemplo n.º 13
0
    def __init__(
        self,
        inputs,
        mode='group',
        concat_axis=-1,
        output_shape=None,
        output_mask=None,
        arguments=None,
        node_indices=None,
        tensor_indices=None,
        name=None,
        version=1,
    ):
        if K.backend() == 'theano' or K.image_dim_ordering() == 'th':
            raise RuntimeError(
                "Only support tensorflow backend or image ordering")

        self.inputs = inputs
        self.mode = mode
        self.concat_axis = concat_axis
        self._output_shape = output_shape
        self.node_indices = node_indices
        self._output_mask = output_mask
        self.arguments = arguments if arguments else {}

        # Layer parameters
        self.inbound_nodes = []
        self.outbound_nodes = []
        self.constraints = {}
        self._trainable_weights = []
        self._non_trainable_weights = []
        self.supports_masking = True
        self.uses_learning_phase = False
        self.input_spec = None

        if not name:
            prefix = self.__class__.__name__.lower()
            name = prefix + '_' + str(K.get_uid(prefix))

        self.name = name

        if inputs:
            # The inputs is a bunch of nodes shares the same input.
            if not node_indices:
                node_indices = [0 for _ in range(len(inputs))]
            self.built = True
            # self.add_inbound_node(inputs, node_indices, tensor_indices)
        else:
            self.built = False
Ejemplo n.º 14
0
def make_yolo_depthwise_separable_head(x, num_filters, block_id_str=None):
    '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
    if not block_id_str:
        block_id_str = str(K.get_uid())
    x = compose(
        DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters * 2,
                                            kernel_size=(3, 3),
                                            block_id_str=block_id_str + '_1'),
        DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=num_filters * 2,
                                            kernel_size=(3, 3),
                                            block_id_str=block_id_str + '_2'),
        DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)
    return x
Ejemplo n.º 15
0
def BlockGroup(
    filters,
    strides,
    num_repeats,
    se_ratio: float = 0.25,
    bn_epsilon: float = 1e-5,
    bn_momentum: float = 0.0,
    activation: str = "relu",
    survival_probability: float = 0.8,
    name=None,
):
    """Create one group of blocks for the ResNet model."""
    if name is None:
        counter = backend.get_uid("block_group_")
        name = f"block_group_{counter}"

    def apply(inputs):
        # Only the first block per block_group uses projection shortcut and
        # strides.
        x = BottleneckBlock(
            filters=filters,
            strides=strides,
            use_projection=True,
            se_ratio=se_ratio,
            bn_epsilon=bn_epsilon,
            bn_momentum=bn_momentum,
            activation=activation,
            survival_probability=survival_probability,
            name=name + "_block_0_",
        )(inputs)

        for i in range(1, num_repeats):
            x = BottleneckBlock(
                filters=filters,
                strides=1,
                use_projection=False,
                se_ratio=se_ratio,
                activation=activation,
                bn_epsilon=bn_epsilon,
                bn_momentum=bn_momentum,
                survival_probability=survival_probability,
                name=name + f"_block_{i}_",
            )(x)
        return x

    return apply
Ejemplo n.º 16
0
def SE(in_filters: int,
       se_ratio: float = 0.25,
       expand_ratio: int = 1,
       name=None):
    """Squeeze and Excitation block."""
    bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
    if name is None:
        counter = backend.get_uid("se_")
        name = f"se_{counter}"

    def apply(inputs):
        x = layers.GlobalAveragePooling2D(name=name + "_se_squeeze")(inputs)
        if bn_axis == 1:
            se_shape = (x.shape[-1], 1, 1)
        else:
            se_shape = (1, 1, x.shape[-1])
        x = layers.Reshape(se_shape, name=name + "_se_reshape")(x)

        num_reduced_filters = max(1, int(in_filters * 4 * se_ratio))

        x = layers.Conv2D(
            filters=num_reduced_filters,
            kernel_size=[1, 1],
            strides=[1, 1],
            kernel_initializer=CONV_KERNEL_INITIALIZER,
            padding="same",
            use_bias=True,
            activation="relu",
            name=name + "_se_reduce",
        )(x)

        x = layers.Conv2D(
            filters=4 * in_filters *
            expand_ratio,  # Expand ratio is 1 by default
            kernel_size=[1, 1],
            strides=[1, 1],
            kernel_initializer=CONV_KERNEL_INITIALIZER,
            padding="same",
            use_bias=True,
            activation="sigmoid",
            name=name + "_se_expand",
        )(x)

        return layers.multiply([inputs, x], name=name + "_se_excite")

    return apply
    def create_input_layer(self, batch_input_shape,
                           input_dtype=None, name=None):
        if not name:
            prefix = self.__class__.__name__.lower() + '_input_'
            name = prefix + str(K.get_uid(prefix))
        if not input_dtype:
            input_dtype = K.floatx()

        self.batch_input_shape = batch_input_shape
        self.input_dtype = input_dtype

        # instantiate the input layer
        x = SparseInput(batch_shape=batch_input_shape,
                  dtype=input_dtype, name=name)
        # this will build the current layer
        # and create the node connecting the current layer
        # to the input layer we just created.
        self(x)
Ejemplo n.º 18
0
    def __init__(self, 
                 size,
                 initializer='glorot_uniform',
                 regularizer=None,
                 name=None,
                 **kwargs):
        self.size = tuple(size)
        self.initializer = initializers.get(initializer)
        self.regularizer = regularizers.get(regularizer)

        if not name:
            prefix = 'shared_weight'
            name = prefix + '_' + str(K.get_uid(prefix))

        Layer.__init__(self, name=name, **kwargs)
        with K.name_scope(self.name):
            self.kernel = self.add_weight(shape=self.size,
                                        initializer=self.initializer,
                                        name='kernel',
                                        regularizer=self.regularizer)


        self.trainable = True
        self.built = True
        # self.sparse = sparse

        input_tensor = self.kernel * 1.0

        self.is_placeholder = False
        input_tensor._keras_shape = self.size
        
        input_tensor._uses_learning_phase = False
        input_tensor._keras_history = (self, 0, 0)

        Node(self,
            inbound_layers=[],
            node_indices=[],
            tensor_indices=[],
            input_tensors=[input_tensor],
            output_tensors=[input_tensor],
            input_masks=[None],
            output_masks=[None],
            input_shapes=[self.size],
            output_shapes=[self.size])
Ejemplo n.º 19
0
    def __init__(self,
                 shape,
                 my_initializer='RandomNormal',
                 name=None,
                 mult=1.0,
                 **kwargs):
        self.shape = [1, *shape]
        self.my_initializer = my_initializer
        self.mult = mult

        if not name:
            prefix = 'param'
            name = '%s_%d' % (prefix, K.get_uid(prefix))
        Layer.__init__(self, name=name, **kwargs)

        # Create a trainable weight variable for this layer.
        with K.name_scope(self.name):
            self.kernel = self.add_weight(name='kernel',
                                          shape=self.shape,
                                          initializer=self.my_initializer,
                                          trainable=True)

        # prepare output tensor, which is essentially the kernel.
        output_tensor = self.kernel * self.mult
        output_tensor._keras_shape = self.shape
        output_tensor._uses_learning_phase = False
        output_tensor._keras_history = (self, 0, 0)
        output_tensor._batch_input_shape = self.shape

        self.trainable = True
        self.built = True
        self.is_placeholder = False

        # create new node
        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[],
             output_tensors=[output_tensor],
             input_masks=[],
             output_masks=[None],
             input_shapes=[],
             output_shapes=[self.shape])
Ejemplo n.º 20
0
    def __init__(self, layers=None, mode='sum', concat_axis=-1,
                 dot_axes=-1, output_shape=None, output_mask=None,
                 node_indices=None, tensor_indices=None, name=None):
        self.layers = layers
        self.mode = mode
        self.concat_axis = concat_axis
        self.dot_axes = dot_axes
        if type(self.dot_axes) == int:
            self.dot_axes = [self.dot_axes, ] * 2
        self._output_shape = output_shape
        self.node_indices = node_indices
        self._output_mask = output_mask

        # layer parameters
        self.inbound_nodes = []
        self.outbound_nodes = []
        self.constraints = {}
        self.regularizers = []
        self.trainable_weights = []
        self.non_trainable_weights = []
        self.supports_masking = True
        self.uses_learning_phase = False
        self.input_spec = None  # compatible with whatever
        if not name:
            prefix = self.__class__.__name__.lower()
            name = prefix + '_' + str(K.get_uid(prefix))
        self.name = name

        if layers:
            # this exists for backwards compatibility.
            # equivalent to:
            # merge = Merge(layers=None)
            # output = merge([input_tensor_1, input_tensor_2])
            if not node_indices:
                # by default we connect to
                # the 1st output stream in the input layer
                node_indices = [0 for _ in range(len(layers))]
            self._arguments_validation(layers, mode,
                                       concat_axis, dot_axes,
                                       node_indices, tensor_indices)
            self.built = True
            self.add_inbound_node(layers, node_indices, tensor_indices)
        else:
            self.built = False
Ejemplo n.º 21
0
def Depthwise_Separable_Conv2D_BN_Leaky(filters,
                                        kernel_size=(3, 3),
                                        block_id_str=None):
    """Depthwise Separable Convolution2D."""
    if not block_id_str:
        block_id_str = str(K.get_uid())
    return compose(
        DepthwiseConv2D(kernel_size,
                        padding='same',
                        name='conv_dw_' + block_id_str),
        BatchNormalization(name='conv_dw_%s_bn' % block_id_str),
        LeakyReLU(alpha=0.1, name='conv_dw_%s_leaky_relu' % block_id_str),
        Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name='conv_pw_%s' % block_id_str),
        BatchNormalization(name='conv_pw_%s_bn' % block_id_str),
        LeakyReLU(alpha=0.1, name='conv_pw_%s_leaky_relu' % block_id_str))
Ejemplo n.º 22
0
def Conv2DFixedPadding(filters, kernel_size, strides, name=None):
    """Conv2D block with fixed padding."""
    if name is None:
        counter = backend.get_uid("conv_")
        name = f"conv_{counter}"

    def apply(inputs):
        if strides > 1:
            inputs = fixed_padding(inputs, kernel_size)
        return layers.Conv2D(
            filters=filters,
            kernel_size=kernel_size,
            strides=strides,
            padding="same" if strides == 1 else "valid",
            use_bias=False,
            kernel_initializer=CONV_KERNEL_INITIALIZER,
            name=name,
        )(inputs)

    return apply
Ejemplo n.º 23
0
 def __init__(self, Auto1, Auto2, layers=None, name=None):
     super(MDL_CW, self).__init__()
     self.layers = []  # Stack of layers.
     self.model = None  # Internal Model instance.
     self.inputs = []  # List of input tensors
     self.outputs = []  # List of length 1: the output tensor (unique).
     self._trainable = True
     self._initial_weights = None
     # Model attributes.
     self.inbound_nodes = []
     self.outbound_nodes = []
     self.built = False
     self.pretrain = False
     self.Auto1 = Auto1
     self.Auto2 = Auto2
     self.Unsupervised_train = False
     # Set model name.
     if not name:
         prefix = 'sequential_'
         name = prefix + str(K.get_uid(prefix))
     self.name = name
Ejemplo n.º 24
0
def Head(num_classes=1000, name=None):
    """Implementation of classification head of RegNet.

    Args:
      num_classes: number of classes for Dense layer
      name: name prefix

    Returns:
      Classification head function.
    """
    if name is None:
        name = str(backend.get_uid("head"))

    def apply(x):
        x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
        x = layers.LayerNormalization(epsilon=1e-6,
                                      name=name + "_head_layernorm")(x)
        x = layers.Dense(num_classes, name=name + "_head_dense")(x)
        return x

    return apply
Ejemplo n.º 25
0
 def __init__(self, name=None, **kwargs):
     if not name:
         prefix = 'optional_input_placeholder'
         name = prefix + '_' + str(K.get_uid(prefix))
     kwargs['batch_input_shape'] = (2, )
     super(_OptionalInputPlaceHolder, self).__init__(**kwargs)
     self.tensor = K.zeros(shape=(2, ))
     self.tensor._keras_shape = (2, )
     self.tensor._uses_learning_phase = False
     self.tensor._keras_history = (self, 0, 0)
     Node(self,
          inbound_layers=[],
          node_indices=[],
          tensor_indices=[],
          input_tensors=[],
          output_tensors=[self.tensor],
          input_masks=[None],
          output_masks=[None],
          input_shapes=[],
          output_shapes=[(2, )])
     self.build((2, ))
Ejemplo n.º 26
0
    def create_input_layer(self,
                           batch_input_shape,
                           input_dtype=None,
                           name=None):
        if not name:
            prefix = self.__class__.__name__.lower() + '_input_'
            name = prefix + str(K.get_uid(prefix))
        if not input_dtype:
            input_dtype = K.floatx()

        self.batch_input_shape = batch_input_shape
        self.input_dtype = input_dtype

        # instantiate the input layer
        x = SparseInput(batch_shape=batch_input_shape,
                        dtype=input_dtype,
                        name=name)
        # this will build the current layer
        # and create the node connecting the current layer
        # to the input layer we just created.
        self(x)
Ejemplo n.º 27
0
    def __init__(
        self,
        input_shape=None,
        batch_size=None,
        dtype=None,
        input_tensor=None,
        sparse=None,
        name=None,
        ragged=None,
        type_spec=None,
        **kwargs,
    ):
        self._init_input_shape = input_shape
        self._init_batch_size = batch_size
        self._init_dtype = dtype
        self._init_sparse = sparse
        self._init_ragged = ragged
        self._init_type_spec = type_spec

        strategy = tf.distribute.get_strategy()
        if (strategy and batch_size is not None
                and distributed_training_utils.global_batch_size_supported(
                    strategy)):
            if batch_size % strategy.num_replicas_in_sync != 0:
                raise ValueError(
                    "The `batch_size` argument ({}) must be divisible by "
                    "the number of replicas ({})".format(
                        batch_size, strategy.num_replicas_in_sync))
            batch_size = batch_size // strategy.num_replicas_in_sync

        if "batch_input_shape" in kwargs:
            batch_input_shape = kwargs.pop("batch_input_shape")
            if input_shape and batch_input_shape:
                raise ValueError("Only provide the input_shape OR "
                                 "batch_input_shape argument to "
                                 "InputLayer, not both at the same time.")
            # Set the input shape and batch size from the batch_input_shape.
            # Note that batch_input_shape can be None (unknown rank) or [] (scalar),
            # in which case the batch size must be None.
            if batch_input_shape:
                batch_size = batch_input_shape[0]
                input_shape = batch_input_shape[1:]
        if kwargs:
            raise ValueError(
                f"Unrecognized keyword arguments: {list(kwargs.keys())}")

        if sparse and ragged:
            raise ValueError(
                "Cannot set both sparse and ragged to True in a Keras input.")

        if not name:
            prefix = "input"
            name = prefix + "_" + str(backend.get_uid(prefix))

        if not dtype:
            if input_tensor is None:
                dtype = backend.floatx()
            else:
                dtype = backend.dtype(input_tensor)
        elif input_tensor is not None and input_tensor.dtype != dtype:
            raise ValueError(
                "`input_tensor.dtype` differs from `dtype`. Received: "
                f"input_tensor.dtype={input_tensor.dtype} "
                f"but expected dtype={dtype}")
        super().__init__(dtype=dtype, name=name)
        self.built = True
        self.sparse = True if sparse else False
        self.ragged = True if ragged else False
        self.batch_size = batch_size
        self.supports_masking = True

        if isinstance(input_shape, tf.TensorShape):
            input_shape = tuple(input_shape.as_list())
        elif isinstance(input_shape, int):
            input_shape = (input_shape, )

        if type_spec is not None:
            args_that_must_be_none = [
                ("(input_)shape", self._init_input_shape),
                ("batch_size", self._init_batch_size),
                ("dtype", self._init_dtype),
                ("input_tensor", input_tensor),
                ("sparse", self._init_sparse),
                ("ragged", self._init_ragged),
            ]
            for arg_name, arg in args_that_must_be_none:
                _assert_other_arg_none(arg_name, arg)
            if not tf.compat.v1.executing_eagerly_outside_functions():
                raise ValueError(
                    "Creating Keras inputs from a type_spec is only "
                    "supported when eager execution is enabled.")
            input_tensor = keras_tensor.keras_tensor_from_type_spec(type_spec)
            if isinstance(input_tensor, keras_tensor.SparseKerasTensor):
                self.sparse = True
            if isinstance(input_tensor, keras_tensor.RaggedKerasTensor):
                self.ragged = True
            self.is_placeholder = True
            try:
                self._batch_input_shape = tuple(input_tensor.shape.as_list())
            except ValueError:
                # If the shape cannot be represented as a tuple (e.g. unknown rank)
                self._batch_input_shape = None
        elif input_tensor is None:
            if input_shape is not None:
                batch_input_shape = (batch_size, ) + tuple(input_shape)
            else:
                batch_input_shape = None
            graph = backend.get_graph()
            with graph.as_default():
                input_tensor = backend.placeholder(
                    shape=batch_input_shape,
                    dtype=dtype,
                    name=self.name,
                    sparse=sparse,
                    ragged=ragged,
                )

            self.is_placeholder = True
            self._batch_input_shape = batch_input_shape
        else:
            if tf.compat.v1.executing_eagerly_outside_functions():
                if not isinstance(input_tensor, keras_tensor.KerasTensor):
                    input_tensor = keras_tensor.keras_tensor_from_tensor(
                        input_tensor)
            else:
                if not tf_utils.is_symbolic_tensor(input_tensor):
                    raise ValueError(
                        "You should not pass an EagerTensor to `Input`. "
                        "For example, instead of creating an "
                        "`InputLayer`, you should instantiate your model "
                        "and directly call it on your input.")
            self.is_placeholder = False
            try:
                self._batch_input_shape = tuple(input_tensor.shape.as_list())
            except ValueError:
                # If the shape cannot be represented as a tuple (e.g. unknown rank)
                self._batch_input_shape = None
        # Create an input node.
        input_tensor._keras_mask = None
        node_module.Node(layer=self, outputs=input_tensor)

        # Store type spec
        if isinstance(input_tensor, keras_tensor.KerasTensor) or (
                tf_utils.is_extension_type(input_tensor)):
            self._type_spec = (input_tensor._type_spec)  # pylint: disable=protected-access
        else:
            self._type_spec = tf.TensorSpec(
                shape=input_tensor.shape,
                dtype=input_tensor.dtype,
                name=self.name,
            )
Ejemplo n.º 28
0
def _get_unique_name(name, prefix=None):
    if prefix is not None:
        name = prefix + '_' + name
    name += '_' + str(K.get_uid(name))
    return name
Ejemplo n.º 29
0
def _unique(name):
    """
    Return a unique name string.
    """
    return name + '_' + str(K.get_uid(name))
Ejemplo n.º 30
0
def FusedMBConvBlock(
    input_filters: int,
    output_filters: int,
    expand_ratio=1,
    kernel_size=3,
    strides=1,
    se_ratio=0.0,
    bn_momentum=0.9,
    activation="swish",
    survival_probability: float = 0.8,
    name=None,
):
    """Fused MBConv Block: Fusing the proj conv1x1 and depthwise_conv into a conv2d."""
    bn_axis = 3 if backend.image_data_format() == "channels_last" else 1

    if name is None:
        name = backend.get_uid("block0")

    def apply(inputs):
        filters = input_filters * expand_ratio
        if expand_ratio != 1:
            x = layers.Conv2D(
                filters,
                kernel_size=kernel_size,
                strides=strides,
                kernel_initializer=CONV_KERNEL_INITIALIZER,
                data_format="channels_last",
                padding="same",
                use_bias=False,
                name=name + "expand_conv",
            )(inputs)
            x = layers.BatchNormalization(axis=bn_axis,
                                          momentum=bn_momentum,
                                          name=name + "expand_bn")(x)
            x = layers.Activation(activation=activation,
                                  name=name + "expand_activation")(x)
        else:
            x = inputs

        # Squeeze and excite
        if 0 < se_ratio <= 1:
            filters_se = max(1, int(input_filters * se_ratio))
            se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
            if bn_axis == 1:
                se_shape = (filters, 1, 1)
            else:
                se_shape = (1, 1, filters)

            se = layers.Reshape(se_shape, name=name + "se_reshape")(se)

            se = layers.Conv2D(
                filters_se,
                1,
                padding="same",
                activation=activation,
                kernel_initializer=CONV_KERNEL_INITIALIZER,
                name=name + "se_reduce",
            )(se)
            se = layers.Conv2D(
                filters,
                1,
                padding="same",
                activation="sigmoid",
                kernel_initializer=CONV_KERNEL_INITIALIZER,
                name=name + "se_expand",
            )(se)

            x = layers.multiply([x, se], name=name + "se_excite")

        # Output phase:
        x = layers.Conv2D(
            output_filters,
            kernel_size=1 if expand_ratio != 1 else kernel_size,
            strides=1 if expand_ratio != 1 else strides,
            kernel_initializer=CONV_KERNEL_INITIALIZER,
            padding="same",
            use_bias=False,
            name=name + "project_conv",
        )(x)
        x = layers.BatchNormalization(axis=bn_axis,
                                      momentum=bn_momentum,
                                      name=name + "project_bn")(x)
        if expand_ratio == 1:
            x = layers.Activation(activation=activation,
                                  name=name + "project_activation")(x)

        # Residual:
        if strides == 1 and input_filters == output_filters:
            if survival_probability:
                x = layers.Dropout(
                    survival_probability,
                    noise_shape=(None, 1, 1, 1),
                    name=name + "drop",
                )(x)
            x = layers.add([x, inputs], name=name + "add")
        return x

    return apply
Ejemplo n.º 31
0
def BottleneckBlock(
    filters: int,
    strides: int,
    use_projection: bool,
    bn_momentum: float = 0.0,
    bn_epsilon: float = 1e-5,
    activation: str = "relu",
    se_ratio: float = 0.25,
    survival_probability: float = 0.8,
    name=None,
):
    """Bottleneck block variant for residual networks with BN."""
    if name is None:
        counter = backend.get_uid("block_0_")
        name = f"block_0_{counter}"

    def apply(inputs):
        bn_axis = 3 if backend.image_data_format() == "channels_last" else 1

        shortcut = inputs

        if use_projection:
            filters_out = filters * 4
            if strides == 2:
                shortcut = layers.AveragePooling2D(
                    pool_size=(2, 2),
                    strides=(2, 2),
                    padding="same",
                    name=name + "_projection_pooling",
                )(inputs)
                shortcut = Conv2DFixedPadding(
                    filters=filters_out,
                    kernel_size=1,
                    strides=1,
                    name=name + "_projection_conv",
                )(shortcut)
            else:
                shortcut = Conv2DFixedPadding(
                    filters=filters_out,
                    kernel_size=1,
                    strides=strides,
                    name=name + "_projection_conv",
                )(inputs)

            shortcut = layers.BatchNormalization(
                axis=bn_axis,
                momentum=bn_momentum,
                epsilon=bn_epsilon,
                name=name + "_projection_batch_norm",
            )(shortcut)

        # First conv layer:
        x = Conv2DFixedPadding(filters=filters,
                               kernel_size=1,
                               strides=1,
                               name=name + "_conv_1")(inputs)
        x = layers.BatchNormalization(
            axis=bn_axis,
            momentum=bn_momentum,
            epsilon=bn_epsilon,
            name=name + "batch_norm_1",
        )(x)
        x = layers.Activation(activation, name=name + "_act_1")(x)

        # Second conv layer:
        x = Conv2DFixedPadding(
            filters=filters,
            kernel_size=3,
            strides=strides,
            name=name + "_conv_2",
        )(x)
        x = layers.BatchNormalization(
            axis=bn_axis,
            momentum=bn_momentum,
            epsilon=bn_epsilon,
            name=name + "_batch_norm_2",
        )(x)
        x = layers.Activation(activation, name=name + "_act_2")(x)

        # Third conv layer:
        x = Conv2DFixedPadding(filters=filters * 4,
                               kernel_size=1,
                               strides=1,
                               name=name + "_conv_3")(x)
        x = layers.BatchNormalization(
            axis=bn_axis,
            momentum=bn_momentum,
            epsilon=bn_epsilon,
            name=name + "_batch_norm_3",
        )(x)

        if 0 < se_ratio < 1:
            x = SE(filters, se_ratio=se_ratio, name=name + "_se")(x)

        # Drop connect
        if survival_probability:
            x = layers.Dropout(
                survival_probability,
                noise_shape=(None, 1, 1, 1),
                name=name + "_drop",
            )(x)

        x = layers.Add()([x, shortcut])

        return layers.Activation(activation, name=name + "_output_act")(x)

    return apply