Esempio n. 1
0
    def _register_layer_statistics(self, layer: tf.keras.layers.Layer,
                                   layer_statistics: list, handles: list):
        channel_axes = get_channel_axis(InputType.INPUTS, '', layer)
        init_config = self.range_init_params.get_init_config_for_quantization_point(
            layer, InputType.INPUTS)

        is_weights = False
        collector_params = RangeInitCollectorParams(is_weights, layer.mode,
                                                    layer.per_channel)
        per_sample_stats = init_config.init_type in [
            'mixed_min_max', 'mean_min_max'
        ]

        reduction_shape = get_reduction_shape_activations(
            layer, channel_axes,
            collector_params.use_per_sample_stats(per_sample_stats))

        num_batches = int(
            np.ceil(init_config.num_init_samples / self.dataset.batch_size))

        collector = RangeInitializer.generate_stat_collector(
            reduction_shape, collector_params, init_config, num_batches)
        handles.append(
            layer.register_hook_pre_quantizer(collector.register_input))
        layer.enabled = False
        layer_statistics.append((layer, collector))
    def __init__(self, backbone: tf.keras.Model,
                 decoder: tf.keras.layers.Layer, head: tf.keras.layers.Layer,
                 input_specs: tf.keras.layers.InputSpec, **kwargs):
        """Segmentation initialization function.

    Args:
      backbone: a backbone network.
      decoder: a decoder network. E.g. FPN.
      head: segmentation head.
      input_specs: The shape specifications of input tensor.
      **kwargs: keyword arguments to be passed.
    """
        inputs = tf.keras.Input(shape=input_specs.shape[1:],
                                name=input_specs.name)
        backbone_features = backbone(inputs)

        if decoder:
            backbone_feature = backbone_features[str(
                decoder.get_config()['level'])]
            decoder_feature = decoder(backbone_feature)
        else:
            decoder_feature = backbone_features

        backbone_feature = backbone_features[str(
            head.get_config()['low_level'])]
        x = {'logits': head((backbone_feature, decoder_feature))}
        super().__init__(inputs=inputs, outputs=x, **kwargs)
        self._config_dict = {
            'backbone': backbone,
            'decoder': decoder,
            'head': head,
        }
        self.backbone = backbone
        self.decoder = decoder
        self.head = head
Esempio n. 3
0
def _get_conv_layer_attributes(
        layer: tf.keras.layers.Layer,
        is_depthwise: bool = False) -> ConvolutionLayerAttributes:
    channel_axis = get_input_channel_axis(layer)
    layer_ = unwrap_layer(layer)
    layer_metatype = get_keras_layer_metatype(layer_, determine_subtype=False)
    strides = layer_.strides[0]
    in_channels = layer.get_input_shape_at(0)[channel_axis]
    out_channels = layer.get_output_shape_at(0)[channel_axis]

    # TF does not deign to properly set the groups attribute on a depthwise layer, and for compatibility
    # with common code the groups attribute of the returned ConvolutionLayerAttribute must be set equal
    # to the in_channels attribute in order for the layer to be detected as depthwise
    groups = layer_.groups if not is_depthwise else in_channels
    kernel_size = layer_.kernel_size

    transpose = layer_metatype in DECONV_LAYER_METATYPES

    return ConvolutionLayerAttributes(layer.trainable,
                                      in_channels,
                                      out_channels,
                                      kernel_size,
                                      strides,
                                      groups,
                                      transpose=transpose,
                                      padding_values=([0, 0, 0, 0]))
def calculate_sparsity_of_layer(layer: tf.keras.layers.Layer):
    layer_params = layer.count_params()
    if layer_params == 0:
        return 0
    weights = layer.get_weights()[0]
    layer_zeros = np.count_nonzero(weights == 0)
    return layer_zeros / layer_params, layer_zeros, layer_params
Esempio n. 5
0
def replace_factory(layer: tf.keras.layers.Layer, name: str = None):
    cfg = layer.get_config()
    prv_name = cfg.pop('name')  # remove name ...
    if name is None:
        name = '{}_replaced'.format(layer.name)
    cfg['name'] = name
    new_layer = layer.from_config(cfg)
    print('{} -> {}'.format(layer, new_layer))
    return new_layer
Esempio n. 6
0
def from_embed_array(tf_embed: tf.keras.layers.Layer, d: Dict, name: str):
    """Restore a simple lookup table embedding from a `weights` array

    :param pytorch_embed: An embedding module
    :param d: A Dict containing a `weights` array to restore
    :param name: name of the layer
    :return: None
    """
    weights = d[f"{name}/weights"]
    tf_embed.set_weights([weights])
Esempio n. 7
0
def from_weight_array(tf_layer: tf.keras.layers.Layer, d: Dict, name: str):
    """Read in {`LayerNorm`, `Linear`, `layers.Dense`} from `weights` and `bias` fields

    :param tf_layer: A layer to get weights for
    :param d: A Dict containing the arrays by key
    :param name: The name of this layer
    :return: None
    """
    weights = d[f"{name}/weights"]
    bias = d[f"{name}/bias"]
    tf_layer.set_weights([weights.T, bias])
Esempio n. 8
0
def _change_rank_of_neuron(linear: tf.keras.layers.Layer, rank: List[int], weight_rank: int, change_bias: bool):
    weights = linear.get_weights()

    linear_weights = [w for _, w in sorted(zip(rank, tf.split(weights[0], len(rank), weight_rank)))]
    linear_weight = tf.concat(linear_weights, weight_rank)

    if not change_bias:
        linear.set_weights([linear_weight, weights[1]])
    else:
        linear_biases = [b for _, b in sorted(zip(rank, tf.split(weights[1], len(rank), 0)))]
        linear_bias = tf.concat(linear_biases, 0)
        linear.set_weights([linear_weight, linear_bias])
Esempio n. 9
0
def _simplify_tf_keras_layers(layer: tf.keras.layers.Layer) -> bin:
    """
    This function converts a keras layer into a serialized keras layer using
    keras serialize.

    Args:
      layer (tf.keras.layers.Layer): an input tensor to be serialized

    Returns:
      tuple: serialized tuple of Layer class. The first value is the
      id of the layer and the second is the layer configuration
      object. The third is the layer weights and the fourth is the
      batch input shape.
    """

    layer_ser = tf.keras.layers.serialize(layer)

    weights = layer.get_weights()
    weights_ser = syft.serde.serde._simplify(weights)

    layer_dict_ser = syft.serde.serde._simplify(layer_ser)

    batch_input_shape_ser = syft.serde.serde._simplify(
        layer_ser["config"]["batch_input_shape"])

    return layer.id, layer_dict_ser, weights_ser, batch_input_shape_ser
Esempio n. 10
0
 def _layer_to_dict(layer: tf.keras.layers.Layer) -> dict:
     return {
         'name': layer.name.replace('_', ' ').capitalize(),
         'input_shape': 'x'.join(str(dim) for dim in layer.input_shape[1:]),
         'output_shape':
         'x'.join(str(dim) for dim in layer.output_shape[1:]),
         'params': layer.count_params()
     }
Esempio n. 11
0
def _get_module_attributes(layer: tf.keras.layers.Layer,
                           attrs: dict) -> ConvolutionModuleAttributes:
    channel_axis = get_input_channel_axis(layer)
    if isinstance(layer, NNCFWrapper):
        strides = layer.layer.strides[0]
        groups = layer.layer.groups
        kernel_size = layer.layer.kernel_size
    else:
        strides = layer.strides[0]
        groups = layer.groups
        kernel_size = layer.kernel_size

    return ConvolutionModuleAttributes(
        layer.trainable,
        layer.get_input_shape_at(0)[channel_axis],
        layer.get_output_shape_at(0)[channel_axis], kernel_size, strides,
        groups)
Esempio n. 12
0
  def _update_einsum_dense(
      self, einsum_dense_layer: tf.keras.layers.Layer) -> tf.keras.layers.Layer:
    """Updates the EinsumDense layer to its spectral-normalized counterparts."""
    if not self._use_spec_norm:
      return einsum_dense_layer

    # Overwrites EinsumDense using the same arguments.
    einsum_dense_kwargs = einsum_dense_layer.get_config()
    return self._spec_norm_dense_layer(**einsum_dense_kwargs)
Esempio n. 13
0
def to_weight_array(tf_layer: tf.keras.layers.Layer, name: str) -> Dict:
    """Convert a {`LayerNorm`, `tf.keras.layers.Dense`} to `weights` and `bias` arrays

    :param tf_layer: A layer to get weights for
    :param name: The name of this layer to serialize
    :return: A Dictionary containing `weights` and `bias` keys
    """

    weights, bias = tf_layer.get_weights()
    return {f"{name}/weights": weights.T, f"{name}/bias": bias}
Esempio n. 14
0
def copy_layer(
    layer: tf.keras.layers.Layer,
    layer_name_suffix: str = '',
    overwrite_params: Optional[Dict[str, Any]] = None,
) -> tf.keras.layers.Layer:
    layer_attrs = layer.get_config()
    if overwrite_params is not None:
        layer_attrs.update(overwrite_params)
    layer_attrs.update({'name': layer_attrs['name'] + layer_name_suffix})
    return layer.__class__.from_config(layer_attrs)
Esempio n. 15
0
def from_embed_array(tf_embed: tf.keras.layers.Layer,
                     d: Dict,
                     name: str,
                     bias=None):
    """Restore a simple lookup table embedding from a `weights` array

    :param tf_embed: An embedding module
    :param d: A Dict containing a `weights` array to restore
    :param name: name of the layer
    :return: None
    """
    weights = [d[f"{name}/weights"]]
    if hasattr(tf_embed, 'pos'):
        pos_weights = d[f"{name}/pos_weights"]
        weights = [pos_weights] + weights
        if hasattr(tf_embed, 'bias') and bias is not None:
            weights = weights + [bias.reshape(1, -1)]

    tf_embed.set_weights(weights)
Esempio n. 16
0
def inference(img_file: str, model: tf.keras.layers.Layer) -> Dict[str, Any]:
    """Inference step."""
    img = cv2.cvtColor(cv2.imread(img_file), cv2.COLOR_BGR2RGB)
    img_ndarray, ratio = _preprocess(img)

    output_dict = model.serve(img_ndarray)
    class_tensor = output_dict['classes'].numpy()
    mask_tensor = output_dict['masks'].numpy()
    group_tensor = output_dict['groups'].numpy()

    indices = np.where(
        class_tensor[0])[0].tolist()  # indices of positive slots.
    mask_list = [mask_tensor[0, :, :, index]
                 for index in indices]  # List of mask ndarray.

    # Form lines and words
    lines = []
    line_indices = []
    for index, mask in tqdm.tqdm(zip(indices, mask_list)):
        line = {
            'words': [],
            'text': '',
        }

        contours, _ = cv2.findContours((mask > 0.).astype(np.uint8),
                                       cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)[-2:]
        for contour in contours:
            if (isinstance(contour, np.ndarray) and len(contour.shape) == 3
                    and contour.shape[0] > 2 and contour.shape[1] == 1
                    and contour.shape[2] == 2):
                cnt_list = (contour[:, 0] * ratio).astype(np.int32).tolist()
                line['words'].append({'text': '', 'vertices': cnt_list})
            else:
                logging.error('Invalid contour: %s, discarded', str(contour))
        if line['words']:
            lines.append(line)
            line_indices.append(index)

    # Form paragraphs
    line_grouping = utilities.DisjointSet(len(line_indices))
    affinity = group_tensor[0][line_indices][:, line_indices]
    for i1, i2 in zip(*np.where(affinity > _PARA_GROUP_THR)):
        line_grouping.union(i1, i2)

    line_groups = line_grouping.to_group()
    paragraphs = []
    for line_group in line_groups:
        paragraph = {'lines': []}
        for id_ in line_group:
            paragraph['lines'].append(lines[id_])
        if paragraph:
            paragraphs.append(paragraph)

    return paragraphs
Esempio n. 17
0
  def load_weights(self, layer: tf.keras.layers.Layer) -> int:
    """Assign weights to layer.

    Given a layer, this function retrieves the weights for that layer in an
    appropriate format and order, and loads them into the layer. Additionally,
    the number of weights loaded are returned.

    If the weights are in an incorrect format, a ValueError
    will be raised by set_weights().

    Args:
      layer: A `tf.keras.layers.Layer`.

    Returns:

    """
    weights = self.get_weights()
    layer.set_weights(weights)

    n_weights = 0
    for w in weights:
      n_weights += w.size
    return n_weights
def forward(prev_output: tf.constant,
            layer: tf.keras.layers.Layer,
            mode: str = None) -> tf.keras.layers.Layer:
    """
    Wendet Transformation auf Gewichte der Schicht an. Momentan forward()(w) = w + c * w^+
    :param layer: Layer eines KNNs
    :param mode: String, der angibt welche Methode verwendet werden soll
                    - 'gamma': w <- w + c * w+
                    - 'pos': w <- w+
                    - 'neg': w <- w-
    :return: Layer mit transformierten Gewichten
    """

    weights = layer.get_weights().copy()

    if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
            layer, tf.keras.layers.Dense):

        try:

            if mode == 'pos':
                layer.set_weights([
                    tf.clip_by_value(weights[0],
                                     clip_value_min=0,
                                     clip_value_max=np.inf)
                ])

            elif mode == 'neg':
                layer.set_weights([
                    tf.clip_by_value(weights[0],
                                     clip_value_min=-np.inf,
                                     clip_value_max=0)
                ])

            elif mode is None:
                print('Falsche Eingabe für "mode" bei Aufruf von forward(.)')

        except IndexError:
            print('Failed')

    layer.activation = None

    out = layer(prev_output)

    layer.set_weights(weights)

    return out
Esempio n. 19
0
def get_layer_call_fn(layer: tf.keras.layers.Layer) -> Callable[[tf.Tensor], tf.Tensor]:
    old_call_fn = layer.call
    layer.old_call = old_call_fn

    @error_handling_agent.catch_smdebug_errors(default_return_val=old_call_fn)
    def call(inputs, *args, **kwargs) -> tf.Tensor:
        layer_input = inputs
        layer_output = old_call_fn(inputs, *args, **kwargs)
        for hook in layer._hooks:
            hook_result = hook(inputs, layer_input=layer_input, layer_output=layer_output)
            if hook_result is not None:
                layer_output = hook_result
        return layer_output

    return call
Esempio n. 20
0
def _is_depthwise_conv(
        layer: tf.keras.layers.Layer,
        wrapper: Optional[tf.keras.layers.Wrapper] = None) -> bool:
    channel_axis = -1 - layer.rank \
        if layer.data_format == 'channels_first' else -1

    channels = layer.get_input_shape_at(0)[channel_axis] if wrapper is None \
        else wrapper.get_input_shape_at(0)[channel_axis]

    if channels is None:
        raise ValueError('The channel dimension of the inputs '
                         'should be defined. Found `None`.')

    input_channels = int(channels)

    return input_channels == layer.groups and input_channels > 1
Esempio n. 21
0
def get_layer_call_fn(
        layer: tf.keras.layers.Layer) -> Callable[[tf.Tensor], tf.Tensor]:
    old_call_fn = layer.call
    layer.old_call = old_call_fn

    def call(inputs, *args, **kwargs) -> tf.Tensor:
        layer_input = inputs
        layer_output = old_call_fn(inputs, *args, **kwargs)
        for hook in layer._hooks:
            hook_result = hook(inputs,
                               layer_input=layer_input,
                               layer_output=layer_output)
            if hook_result is not None:
                layer_output = hook_result
        return layer_output

    return call
Esempio n. 22
0
def _capture_call_tf(input: tf.Tensor,
                     fe_storage: Dict[Union[str, torch.device], Tensor],
                     fe_layer: tf.keras.layers.Layer, **kwargs) -> tf.Tensor:
    """A function to capture the output of a TF model layer.

    Args:
        input: The input tensor to the layer. Note that this must be the first argument in the method signature.
        fe_storage: A place to store the output from the layer.
        fe_layer: A tf layer such that fe_layer(input) -> output.
        **kwargs: Any arguments to be passed along to the fe_layer call method.

    Returns:
        The output of the given layer for the specified input.
    """
    output = fe_layer.fe_original_call(input, **kwargs)
    fe_storage[
        ''] = output  # TF multi-gpu doesn't need to store separately per device
    return output
Esempio n. 23
0
  def _make_block_prunable(
      self, layer: tf.keras.layers.Layer) -> tf.keras.layers.Layer:
    if isinstance(layer, tf.keras.Model):
      return tf.keras.models.clone_model(
          layer, input_tensors=None, clone_function=self._make_block_prunable)

    if layer.__class__ not in self._BLOCK_LAYER_SUFFIX_MAP:
      return layer

    prunable_weights = []
    for layer_suffix in self._BLOCK_LAYER_SUFFIX_MAP[layer.__class__]:
      for weight in layer.weights:
        if weight.name.endswith(layer_suffix):
          prunable_weights.append(weight)

    def get_prunable_weights():
      return prunable_weights

    layer.get_prunable_weights = get_prunable_weights

    return layer
Esempio n. 24
0
def forward(prev_output: tf.constant,
            layer: tf.keras.layers.Layer,
            c: int = 0,
            mode: str = None,
            no_bias: bool = False) -> tf.keras.layers.Layer:
    """
    Wendet Transformation auf Gewichte der Schicht für forward pass an
    :param layer: Layer eines KNNs
    :param c: Faktor mit dem Positivteil der Gewichte multipliziert wird
    :param mode: String, der angibt welche Methode verwendet werden soll
                    - 'gamma': w <- w + c * w+
                    - 'pos': w <- w+
                    - 'neg': w <- w-
    :return: Layer mit transformierten Gewichten
    """
    # Gewichte werden kopiert
    weights = layer.get_weights().copy()

    # Transformation soll nicht auf Hilfslayer angewendet werden
    if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(
            layer, tf.keras.layers.Dense):

        # Wenn Bias nicht verwendet wird, setze ihn auf 0
        if no_bias:
            weights[1] = weights[1] * 0.

        # Positive Gewichte werden mit Faktor (1 + c) vergrößert
        if mode == 'gamma':
            layer.set_weights([
                tf.add(
                    weights[0],
                    tf.clip_by_value(np.multiply(weights[0], c),
                                     clip_value_min=0,
                                     clip_value_max=np.inf)),
                tf.add(
                    weights[1],
                    tf.clip_by_value(np.multiply(weights[1], c),
                                     clip_value_min=0,
                                     clip_value_max=np.inf))
            ])

        # Nur positive Gewichte werden benutzt. Positive Biaswerte werden beibehalten und theoretisch als weieres
        # Neuron angesehen, um b ≤ 0 zu schützen
        elif mode == 'pos':
            layer.set_weights([
                tf.clip_by_value(weights[0],
                                 clip_value_min=0,
                                 clip_value_max=np.inf),
                tf.clip_by_value(weights[1],
                                 clip_value_min=0,
                                 clip_value_max=np.inf)
            ])

        # Nur negative Gewichte werden benutzt
        elif mode == 'neg':
            layer.set_weights([
                tf.clip_by_value(weights[0],
                                 clip_value_min=-np.inf,
                                 clip_value_max=0),
                tf.clip_by_value(weights[1],
                                 clip_value_min=-np.inf,
                                 clip_value_max=0)
            ])

        elif mode is None:
            pass

        else:
            print('Falsche Eingabe für "mode" bei Aufruf von forward(.)')

    # Gesamtinput soll berechnet, dazu darf die Aktivierung noch nicht angewendet werden
    layer.activation = None

    # Forward pass wird durchgeführt
    out = layer(prev_output)

    # Ursprungsgewichte werden wieder gesetzt
    layer.set_weights(weights)

    return out
Esempio n. 25
0
def _reset_layer_or_model_recursively(layer_or_model: tf.keras.layers.Layer):
    layer_or_model.built = False
    if isinstance(layer_or_model, tf.keras.Model):
        for each_layer in layer_or_model.layers:
            _reset_layer_or_model_recursively(each_layer)
Esempio n. 26
0
 def get_sub_layers(layer: tf.keras.layers.Layer):
     layers = layer._flatten_layers(recursive=False, include_self=False)
     return sorted([(l.name, l) for l in layers])