Esempio n. 1
0
    def forward(self, inputs):
        # W = tl.act.sign(W)    # dont update ...
        alpha = compute_alpha(self.W)
        W_ = ternary_operation(self.W)
        W_ = tf.multiply(alpha, W_)
        # W = tf.Variable(W)

        outputs = tf.matmul(inputs, W_)
        # self.outputs = xnor_gemm(self.inputs, W) # TODO

        if self.b_init is not None:
            outputs = tf.nn.bias_add(outputs, self.b, name='bias_add')
        if self.act:
            outputs = self.act(outputs)
        return outputs
Esempio n. 2
0
    def forward(self, inputs):
        # W = tl.act.sign(W)    # dont update ...
        alpha = compute_alpha(self.W)
        W_ = ternary_operation(self.W)
        W_ = tf.multiply(alpha, W_)
        # W = tf.Variable(W)

        outputs = tf.matmul(inputs, W_)
        # self.outputs = xnor_gemm(self.inputs, W) # TODO

        if self.b_init is not None:
            outputs = tf.nn.bias_add(outputs, self.b, name='bias_add')
        if self.act:
            outputs = self.act(outputs)
        return outputs
Esempio n. 3
0
    def forward(self, inputs):

        alpha = compute_alpha(self.W)

        W_ = ternary_operation(self.W)
        W_ = tf.multiply(alpha, W_)

        outputs = tf.nn.conv2d(inputs,
                               W_,
                               strides=self.strides,
                               padding=self.padding,
                               use_cudnn_on_gpu=self.use_cudnn_on_gpu,
                               data_format=self.data_format)

        if self.b_init:
            outputs = tf.nn.bias_add(outputs, self.b, name='bias_add')

        if self.act:
            outputs = self.act(outputs)

        return outputs
Esempio n. 4
0
    def forward(self, inputs):

        alpha = compute_alpha(self.W)

        W_ = ternary_operation(self.W)
        W_ = tf.multiply(alpha, W_)

        outputs = tf.nn.conv2d(
            input=inputs,
            filters=W_,
            strides=self._strides,
            padding=self.padding,
            data_format=self.data_format,
            dilations=self._dilation_rate,
            name=self.name
        )

        if self.b_init:
            outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add')
        if self.act:
            outputs = self.act(outputs)

        return outputs
Esempio n. 5
0
    def __init__(
            self,
            prev_layer,
            n_filter=32,
            filter_size=(3, 3),
            strides=(1, 1),
            act=None,
            padding='SAME',
            use_gemm=False,
            W_init=tf.truncated_normal_initializer(stddev=0.02),
            b_init=tf.constant_initializer(value=0.0),
            W_init_args=None,
            b_init_args=None,
            use_cudnn_on_gpu=None,
            data_format=None,
            # act=None,
            # shape=(5, 5, 1, 100),
            # strides=(1, 1, 1, 1),
            # padding='SAME',
            # W_init=tf.truncated_normal_initializer(stddev=0.02),
            # b_init=tf.constant_initializer(value=0.0),
            # W_init_args=None,
            # b_init_args=None,
            # use_cudnn_on_gpu=None,
            # data_format=None,
            name='ternary_cnn2d',
    ):
        super(TernaryConv2d, self
             ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name)

        logging.info(
            "TernaryConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s" % (
                self.name, n_filter, str(filter_size), str(strides), padding,
                self.act.__name__ if self.act is not None else 'No Activation'
            )
        )

        if len(strides) != 2:
            raise ValueError("len(strides) should be 2.")

        if use_gemm:
            raise Exception("TODO. The current version use tf.matmul for inferencing.")

        try:
            pre_channel = int(prev_layer.outputs.get_shape()[-1])
        except Exception:  # if pre_channel is ?, it happens when using Spatial Transformer Net
            pre_channel = 1
            logging.warning("unknow input channels, set to 1")

        shape = (filter_size[0], filter_size[1], pre_channel, n_filter)
        strides = (1, strides[0], strides[1], 1)

        with tf.variable_scope(name):

            W = tf.get_variable(
                name='W_conv2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **self.W_init_args
            )

            alpha = compute_alpha(W)

            W = ternary_operation(W)
            W = tf.multiply(alpha, W)

            self.outputs = tf.nn.conv2d(
                self.inputs, W, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu,
                data_format=data_format
            )

            if b_init:

                b = tf.get_variable(
                    name='b_conv2d', shape=(shape[-1]), initializer=b_init, dtype=LayersConfig.tf_dtype,
                    **self.b_init_args
                )

                self.outputs = tf.nn.bias_add(self.outputs, b, name='bias_add')

            self.outputs = self._apply_activation(self.outputs)

        self._add_layers(self.outputs)

        if b_init:
            self._add_params([W, b])
        else:
            self._add_params(W)
    def __init__(
            self,
            prev_layer,
            n_units=100,
            act=None,
            use_gemm=False,
            W_init=tf.truncated_normal_initializer(stddev=0.1),
            b_init=tf.constant_initializer(value=0.0),
            W_init_args=None,
            b_init_args=None,
            name='ternary_dense',
    ):
        super(TernaryDenseLayer, self
             ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name)

        logging.info(
            "TernaryDenseLayer  %s: %d %s" %
            (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation')
        )

        if self.inputs.get_shape().ndims != 2:
            raise Exception("The input dimension must be rank 2, please reshape or flatten it")

        if use_gemm:
            raise Exception("TODO. The current version use tf.matmul for inferencing.")

        n_in = int(self.inputs.get_shape()[-1])

        self.n_units = n_units

        with tf.variable_scope(name):

            W = tf.get_variable(
                name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **self.W_init_args
            )

            # W = tl.act.sign(W)    # dont update ...
            alpha = compute_alpha(W)
            W = ternary_operation(W)
            W = tf.multiply(alpha, W)
            # W = tf.Variable(W)

            self.outputs = tf.matmul(self.inputs, W)
            # self.outputs = xnor_gemm(self.inputs, W) # TODO

            if b_init is not None:
                try:
                    b = tf.get_variable(
                        name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **self.b_init_args
                    )
                except Exception:  # If initializer is a constant, do not specify shape.
                    b = tf.get_variable(name='b', initializer=b_init, dtype=LayersConfig.tf_dtype, **self.b_init_args)

                self.outputs = tf.nn.bias_add(self.outputs, b, name='bias_add')

            self.outputs = self._apply_activation(self.outputs)

        self._add_layers(self.outputs)

        if b_init is not None:
            self._add_params([W, b])
        else:
            self._add_params(W)