Ejemplo n.º 1
0
    def __init__(self,
                 model,
                 number=5,
                 conv2d="tucker2",
                 vbmf_weaken_factor=0.8):
        self.model = model
        self.layers = []
        self.iteration = 0
        self.number = number
        self.vbmf_weaken_factor = vbmf_weaken_factor

        if conv2d == "tucker2":
            self.conv2d = (conv2d, (None, None))
        elif conv2d == "cp3" or conv2d == "cp4":
            self.conv2d = (conv2d, None)
        else:
            raise CompressionError(
                "Wrong value for conv2d, use one of [\"tucker2\", \"cp4\", \"cp3\"]"
            )

        for layer in self.model.layers:
            if isinstance(layer, keras.layers.Conv2D):
                self.layers.append([layer.name, self.conv2d])
            elif isinstance(layer, keras.layers.Dense):
                self.layers.append([layer.name, ("svd", None)])
Ejemplo n.º 2
0
def get_weights_and_bias(layer):
    """Returns weights and biases.

    :param layer: a source layer
    :return: If layer is tf.keras.layers.Conv2D layer.weights is returned as weights,
             Otherwise a list of weight tensors and bias tensor are returned as weights.
             The second element that is returned is a bias tensor.
             Note that all weights are returned in PyTorch dimension order:
             [out_channels, in_channels, kernel_size[0]*kernel_size[1]]
    """

    weights = None
    bias = None

    if isinstance(layer, keras.Sequential):
        weights = layer.layers[1].get_weights()[0]
        bias = layer.layers[-1].get_weights()[-1]
    elif isinstance(layer, keras.layers.Conv2D):
        weights, bias = layer.get_weights()

    weights = to_pytorch_kernel_order(weights)
    weights = weights.reshape((*weights.shape[:2], -1))

    if weights is None or bias is None:
        raise CompressionError()

    return weights, bias
Ejemplo n.º 3
0
def get_cp_factors(layer, rank, cin, cout, kernel_size, **kwargs):
    weights, bias = get_weights_and_bias(layer)
    w_cin = None
    w_z = None
    w_cout = None

    if isinstance(layer, keras.Sequential):
        w_cout, w_cin, w_z = recompress_ncpd_tensor(weights,
                                                    new_rank=rank,
                                                    max_cycle=500,
                                                    return_fit=False,
                                                    tensor_format="cpd")
    elif isinstance(layer, keras.layers.Conv2D):
        P, _, _ = cp_als(dtensor(weights), rank, init="random")
        w_cin, w_cout, w_z = extract_weights_tensors(P)

    if w_cin is None or w_z is None or w_cout is None:
        raise CompressionError()

    # Reshape to the proper PyTorch shape order.
    w_cin = w_cin.T.reshape((rank, cin, 1, 1))
    w_z = w_z.T.reshape((rank, 1, *kernel_size))
    w_cout = w_cout.reshape((cout, rank, 1, 1))

    # Reorder to TensorFlow order.
    w_cin, w_z, w_cout = [to_tf_kernel_order(w) for w in [w_cin, w_z, w_cout]]

    return [w_cin, w_z, w_cout], [None, None, bias]
Ejemplo n.º 4
0
def get_conv_params(layer):
    cin = None
    cout = None
    kernel_size = None
    padding = None
    strides = None
    activation = None
    batch_input_shape = None

    if isinstance(layer, keras.Sequential):
        # If the layer has been decomposed at least once, then
        # the first layer in a sequence contains in_channels,
        # the second layer contains information about kernel_size, padding and strides,
        # the third layer contains information about out_channels.
        layer_1, layer_2, layer_3 = layer.layers
        conf_1, conf_2, conf_3 = layer_1.get_config(), layer_2.get_config(
        ), layer_3.get_config()

        if "batch_input_shape" in conf_1:
            batch_input_shape = conf_1["batch_input_shape"]

        cin = layer_1.input_shape[
            -1] if layer_1.data_format == "channels_last" else layer_1.input_shape[
                0]
        cout = layer_3.output_shape[
            -1] if layer_3.data_format == "channels_last" else layer_3.output_shape[
                0]
        kernel_size = conf_2["kernel_size"]
        padding = conf_2["padding"]
        strides = conf_2["strides"]
        activation = conf_3["activation"]
    elif isinstance(layer, keras.layers.Conv2D):
        cin = layer.input_shape[
            -1] if layer.data_format == "channels_last" else layer.input_shape[
                0]
        cout = layer.output_shape[
            -1] if layer.data_format == "channels_last" else layer.output_shape[
                0]
        layer_conf = layer.get_config()
        kernel_size = layer_conf["kernel_size"]
        padding = layer_conf["padding"]
        strides = layer_conf["strides"]
        activation = layer_conf["activation"]

        if "batch_input_shape" in layer_conf:
            batch_input_shape = layer_conf["batch_input_shape"]

    if cin is None or cout is None or kernel_size is None or padding is None or strides is None or \
            activation is None:
        raise CompressionError()

    return dict(cin=cin,
                cout=cout,
                kernel_size=kernel_size,
                padding=padding,
                strides=strides,
                batch_input_shape=batch_input_shape,
                activation=activation)
Ejemplo n.º 5
0
def get_weights_and_bias(layer):
    """Returns weights and biases.

    :param layer: a source layer
    :return: If layer is tf.keras.layers.Conv2D layer.weights is returned as weights,
             Otherwise a list of weight tensors and bias tensor are returned as weights.
             The second element that is returned is a bias tensor.
             Note that all weights are returned in PyTorch dimension order:
             [out_channels, in_channels, kernel_size[0]*kernel_size[1]]
    """

    weights = None
    bias = None

    if isinstance(layer, keras.Sequential):
        w_cin, _, w_z, w_cout, bias = layer.get_weights()
        w_cin, w_z, w_cout = [
            to_pytorch_kernel_order(w) for w in [w_cin, w_z, w_cout]
        ]

        # Reshape 4D tensors into 4D matrix.
        # w_cin and w_cout have two dimension of size 1.
        # w_z has second dimension that is equal to 1.
        w_cin = w_cin.reshape(w_cin.shape[:2]).T
        w_cout = w_cout.reshape(w_cout.shape[:2])
        w_z = w_z.reshape((w_z.shape[0], np.prod(w_z.shape[2:]))).T
        weights = [w_cout, w_cin, w_z]
    elif isinstance(layer, keras.layers.Conv2D):
        weights, bias = layer.get_weights()
        weights = to_pytorch_kernel_order(weights)
        weights = weights.reshape((*weights.shape[:2], -1))

    if weights is None or bias is None:
        raise CompressionError()

    return weights, bias
Ejemplo n.º 6
0
def get_weights_and_bias(layer):
    """Returns weights and biases.

    :param layer: a source layer
    :return: If layer is tf.keras.layers.Conv2D layer.weights is returned as weights,
             Otherwise a list of weight tensors and bias tensor are returned as weights.
             The second element that is returned is a bias tensor.
             Note that all weights are returned in PyTorch dimension order:
             [out_channels, in_channels, kernel_size[0]*kernel_size[1]]
    """

    weights = None
    bias = None

    if isinstance(layer, keras.Sequential):
        w_cin, _, w_h, w_w, w_cout, bias = layer.get_weights()
        w_cin, w_cout = to_pytorch_kernel_order(w_cin), to_pytorch_kernel_order(w_cout)

        # The middle layers are depthwise it should have order
        # [rank, 1, kernel_size, kernel_size].
        # This reorders it correctly from TensorFlow order to PyTorch order
        # w_h, w_w = depthwise_to_pytorch_kernel_order(w_h), depthwise_to_pytorch_kernel_order(w_w).
        w_h, w_w = to_pytorch_kernel_order(w_h), to_pytorch_kernel_order(w_w)
        w_cin = w_cin.reshape(w_cin.shape[:2]).T
        w_h = w_h.reshape((w_h.shape[0], w_h.shape[2])).T
        w_w = w_w.reshape((w_w.shape[0], w_w.shape[3])).T
        w_cout = w_cout.reshape(w_cout.shape[:2])
        weights = [w_cout, w_cin, w_h, w_w]
    elif isinstance(layer, keras.layers.Conv2D):
        weights, bias = layer.get_weights()
        weights = to_pytorch_kernel_order(weights)

    if weights is None or bias is None:
        raise CompressionError()

    return weights, bias