コード例 #1
0
ファイル: dorefa_dense.py プロジェクト: zsdonghao/tensorlayer
 def forward(self, inputs):
     inputs = quantize_active(cabs(inputs), self.bitA)
     W_ = quantize_weight(self.W, self.bitW)
     outputs = tf.matmul(inputs, W_)
     # self.outputs = xnor_gemm(self.inputs, W) # TODO
     if self.b_init is not None:
         outputs = tf.nn.bias_add(outputs, self.b, name='bias_add')
         # self.outputs = xnor_gemm(self.inputs, W) + b # TODO
     if self.act:
         outputs = self.act(outputs)
     return outputs
コード例 #2
0
ファイル: dorefa_dense.py プロジェクト: zzyydtc/tensorlayer
 def forward(self, inputs):
     inputs = quantize_active(cabs(inputs), self.bitA)
     W_ = quantize_weight(self.W, self.bitW)
     outputs = tf.matmul(inputs, W_)
     # self.outputs = xnor_gemm(self.inputs, W) # TODO
     if self.b_init is not None:
         outputs = tf.nn.bias_add(outputs, self.b, name='bias_add')
         # self.outputs = xnor_gemm(self.inputs, W) + b # TODO
     if self.act:
         outputs = self.act(outputs)
     return outputs
コード例 #3
0
ファイル: dorefa_conv.py プロジェクト: zzyydtc/tensorlayer
    def forward(self, inputs):

        inputs = quantize_active(cabs(inputs), self.bitA)  # Do not remove

        W_ = quantize_weight(self.W, self.bitW)

        outputs = tf.nn.conv2d(
            input=inputs, filters=W_, strides=self._strides, padding=self.padding, data_format=self.data_format,
            dilations=self._dilation_rate, name=self.name
        )

        if self.b_init:
            outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add')
        if self.act:
            outputs = self.act(outputs)

        return outputs
コード例 #4
0
    def forward(self, inputs):

        inputs = quantize_active(cabs(inputs), self.bitA)  # Do not remove

        W_ = quantize_weight(self.W, self.bitW)

        outputs = tf.nn.conv2d(
            inputs, W_, strides=self.strides, padding=self.padding, use_cudnn_on_gpu=self.use_cudnn_on_gpu,
            data_format=self.data_format
        )

        if self.b_init:
            outputs = tf.nn.bias_add(outputs, self.b, name='bias_add')
        if self.act:
        outputs = self.act(outputs)

        return outputs
コード例 #5
0
ファイル: dorefa_conv.py プロジェクト: zsdonghao/tensorlayer
    def forward(self, inputs):

        inputs = quantize_active(cabs(inputs), self.bitA)  # Do not remove

        W_ = quantize_weight(self.W, self.bitW)

        outputs = tf.nn.conv2d(
            input=inputs,
            filters=W_,
            strides=self._strides,
            padding=self.padding,
            data_format=self.data_format,
            dilations=self._dilation_rate,
            name=self.name
        )

        if self.b_init:
            outputs = tf.nn.bias_add(outputs, self.b, data_format=self.data_format, name='bias_add')
        if self.act:
            outputs = self.act(outputs)

        return outputs
コード例 #6
0
ファイル: dorefa_conv.py プロジェクト: zouqy/tensorlayer
    def __init__(
        self,
        prev_layer,
        bitW=1,
        bitA=3,
        n_filter=32,
        filter_size=(3, 3),
        strides=(1, 1),
        act=None,
        padding='SAME',
        use_gemm=False,
        W_init=tf.truncated_normal_initializer(stddev=0.02),
        b_init=tf.constant_initializer(value=0.0),
        W_init_args=None,
        b_init_args=None,
        use_cudnn_on_gpu=None,
        data_format=None,
        # act=None,
        # shape=(5, 5, 1, 100),
        # strides=(1, 1, 1, 1),
        # padding='SAME',
        # W_init=tf.truncated_normal_initializer(stddev=0.02),
        # b_init=tf.constant_initializer(value=0.0),
        # W_init_args=None,
        # b_init_args=None,
        # use_cudnn_on_gpu=None,
        # data_format=None,
        name='dorefa_cnn2d',
    ):
        super(DorefaConv2d, self).__init__(prev_layer=prev_layer,
                                           act=act,
                                           W_init_args=W_init_args,
                                           b_init_args=b_init_args,
                                           name=name)

        logging.info(
            "DorefaConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s"
            % (self.name, n_filter, str(filter_size), str(strides), padding,
               self.act.__name__ if self.act is not None else 'No Activation'))

        self.inputs = quantize_active(cabs(self.inputs), bitA)  # Do not remove

        if use_gemm:
            raise Exception(
                "TODO. The current version use tf.matmul for inferencing.")

        if len(strides) != 2:
            raise ValueError("len(strides) should be 2.")

        try:
            pre_channel = int(prev_layer.outputs.get_shape()[-1])
        except Exception:  # if pre_channel is ?, it happens when using Spatial Transformer Net
            pre_channel = 1
            logging.warning("[warnings] unknow input channels, set to 1")

        shape = (filter_size[0], filter_size[1], pre_channel, n_filter)
        strides = (1, strides[0], strides[1], 1)

        with tf.variable_scope(name):
            W = tf.get_variable(name='W_conv2d',
                                shape=shape,
                                initializer=W_init,
                                dtype=LayersConfig.tf_dtype,
                                **self.W_init_args)

            W = quantize_weight(W, bitW)

            self.outputs = tf.nn.conv2d(self.inputs,
                                        W,
                                        strides=strides,
                                        padding=padding,
                                        use_cudnn_on_gpu=use_cudnn_on_gpu,
                                        data_format=data_format)

            if b_init:
                b = tf.get_variable(name='b_conv2d',
                                    shape=(shape[-1]),
                                    initializer=b_init,
                                    dtype=LayersConfig.tf_dtype,
                                    **self.b_init_args)

                self.outputs = tf.nn.bias_add(self.outputs, b, name='bias_add')

            self.outputs = self._apply_activation(self.outputs)

        self._add_layers(self.outputs)

        if b_init:
            self._add_params([W, b])
        else:
            self._add_params(W)
コード例 #7
0
    def __init__(
        self,
        prev_layer,
        bitW=1,
        bitA=3,
        n_units=100,
        act=None,
        use_gemm=False,
        W_init=tf.truncated_normal_initializer(stddev=0.1),
        b_init=tf.constant_initializer(value=0.0),
        W_init_args=None,
        b_init_args=None,
        name='dorefa_dense',
    ):
        super(DorefaDenseLayer, self).__init__(prev_layer=prev_layer,
                                               act=act,
                                               W_init_args=W_init_args,
                                               b_init_args=b_init_args,
                                               name=name)

        logging.info(
            "DorefaDenseLayer  %s: %d %s" %
            (self.name, n_units,
             self.act.__name__ if self.act is not None else 'No Activation'))

        if self.inputs.get_shape().ndims != 2:
            raise Exception(
                "The input dimension must be rank 2, please reshape or flatten it"
            )
        if use_gemm:
            raise Exception(
                "TODO. The current version use tf.matmul for inferencing.")

        n_in = int(self.inputs.get_shape()[-1])
        self.n_units = n_units

        self.inputs = quantize_active(cabs(self.inputs), bitA)

        with tf.variable_scope(name):

            W = tf.get_variable(name='W',
                                shape=(n_in, n_units),
                                initializer=W_init,
                                dtype=LayersConfig.tf_dtype,
                                **self.W_init_args)
            # W = tl.act.sign(W)    # dont update ...
            W = quantize_weight(W, bitW)
            # W = tf.Variable(W)
            # print(W)

            self.outputs = tf.matmul(self.inputs, W)
            # self.outputs = xnor_gemm(self.inputs, W) # TODO

            if b_init is not None:
                try:
                    b = tf.get_variable(name='b',
                                        shape=(n_units),
                                        initializer=b_init,
                                        dtype=LayersConfig.tf_dtype,
                                        **self.b_init_args)

                except Exception:  # If initializer is a constant, do not specify shape.
                    b = tf.get_variable(name='b',
                                        initializer=b_init,
                                        dtype=LayersConfig.tf_dtype,
                                        **self.b_init_args)

                self.outputs = tf.nn.bias_add(self.outputs, b, name='bias_add')
                # self.outputs = xnor_gemm(self.inputs, W) + b # TODO

            self.outputs = self._apply_activation(self.outputs)

        self._add_layers(self.outputs)
        if b_init is not None:
            self._add_params([W, b])
        else:
            self._add_params(W)