예제 #1
0
파일: bwn.py 프로젝트: wm901115nwpu/LLSQ
    def forward(self, x):
        if self.alpha is None:
            return F.conv2d(x, self.weight, self.bias, self.stride,
                            self.padding, self.dilation, self.groups)
        w_reshape = self.weight.reshape([self.weight.shape[0], -1]).transpose(0, 1)
        if self.training and self.init_state == 0:
            # self.alpha.data.copy_(torch.ones(1))
            if self.q_mode == Qmodes.layer_wise:
                alpha_fp = torch.mean(torch.abs(w_reshape.data))
                alpha_s = log_shift(alpha_fp)
                if alpha_s >= 1:
                    alpha_s /= 2
                print('{}==>{}'.format(alpha_fp.item(), alpha_s.item()))
            else:
                alpha_fp = torch.mean(torch.abs(w_reshape.data), dim=0)
                alpha_s = log_shift(alpha_fp)
                for i in range(len(alpha_s)):
                    if alpha_s[i] >= 1:
                        alpha_s /= 2
            self.alpha.data.copy_(alpha_s)
            self.init_state.fill_(1)

        alpha = self.alpha.detach()
        pre_quantized_weight = w_reshape / alpha
        quantized_weight = alpha * FunSign.apply(pre_quantized_weight)
        w_q = quantized_weight.transpose(0, 1).reshape(self.weight.shape)
        return F.conv2d(x, w_q, self.bias, self.stride,
                        self.padding, self.dilation, self.groups)
예제 #2
0
 def forward(self, x):
     if self.alpha is None:
         return F.conv2d(x, self.weight, self.bias, self.stride,
                         self.padding, self.dilation, self.groups)
     Qn = -2**(self.nbits - 1)
     Qp = 2**(self.nbits - 1) - 1
     w_reshape = self.weight.reshape([self.weight.shape[0],
                                      -1]).transpose(0, 1)
     if self.training and self.init_state == 0:
         if self.q_mode == Qmodes.layer_wise:
             alpha_fp = w_reshape.detach().abs().max() / (Qp + 1)
             alpha_s = log_shift(alpha_fp)
             print('{}==>{}'.format(alpha_fp.item(), alpha_s.item()))
         else:
             alpha_fp = w_reshape.detach().abs().max(dim=0)[0] / Qp
             ipdb.set_trace()
             alpha_s = log_shift(alpha_fp)
             print('-----')
         self.alpha.data.copy_(alpha_s)
         self.init_state.fill_(1)
     alpha = self.alpha.detach()
     w_reshape_q = (w_reshape / alpha).round().clamp(Qn, Qp) * alpha
     w_q = w_reshape_q.transpose(0, 1).reshape(self.weight.shape)
     return F.conv2d(x, w_q, self.bias, self.stride, self.padding,
                     self.dilation, self.groups)
예제 #3
0
파일: rnn_q.py 프로젝트: wm901115nwpu/LLSQ
    def forward(self, x, hx=None, prefix='', loop_id=-1, save=False):
        self.check_forward_input(x)
        if hx is None:
            hx = x.new_zeros(x.size(0), self.hidden_size, requires_grad=False)
            hx = (hx, hx)
        self.check_forward_hidden(x, hx[0], '[0]')
        self.check_forward_hidden(x, hx[1], '[1]')
        h_prev, c_prev = hx
        x_h_prev = torch.cat((x, h_prev), dim=1)
        x_h_prev_q = self.actq1(x_h_prev)
        self.save_inner_data(save, prefix, 'x_h_prev_q', loop_id, x_h_prev_q)
        weight_ih_hh = torch.cat((self.weight_ih, self.weight_hh), dim=1)
        bias_ih_hh = self.bias_ih + self.bias_hh
        if self.alpha is None:  # don't quantize weight and bias
            fc_gate = F.linear(x_h_prev_q, weight_ih_hh, bias_ih_hh)
        else:
            if self.training and self.init_state == 0:
                alpha_fp = torch.mean(torch.abs(weight_ih_hh))
                alpha_s = log_shift(alpha_fp)
                if alpha_s >= 1:
                    alpha_s /= 2
                print('{}==>{}'.format(alpha_fp.item(), alpha_s.item()))
                self.alpha.data.copy_(alpha_s)
                self.init_state.fill_(1)

            alpha = self.alpha.detach()
            self.save_inner_data(save, prefix, 'alpha', 0, alpha)
            weight_ih_hh_q = alpha * FunSign.apply(weight_ih_hh / alpha)
            self.save_inner_data(save, prefix, 'weight_ih_hh_q', 0,
                                 weight_ih_hh_q)
            # todo: quantize bias
            self.save_inner_data(save, prefix, 'bias_ih_hh', 0, bias_ih_hh)
            fc_gate = F.linear(x_h_prev_q, weight_ih_hh_q, bias_ih_hh)
            # todo: no bias
            fc_gate = F.linear(x_h_prev_q, weight_ih_hh_q)
        fc_gate_q = self.actq4(fc_gate)
        i, f, g, o = torch.chunk(fc_gate_q, 4, dim=1)
        i, f, g, o = self.actq3(self.act_i(i)), self.actq3(self.act_f(f)), \
                     self.actq4(self.act_g(g)), self.actq3(self.act_o(o))
        self.save_inner_data(save, prefix, 'i', loop_id, i)
        self.save_inner_data(save, prefix, 'f', loop_id, f)
        self.save_inner_data(save, prefix, 'g', loop_id, g)
        self.save_inner_data(save, prefix, 'o', loop_id, o)
        self.save_inner_data(save, prefix, 'c_prev', loop_id, c_prev)
        ci, cf = self.actq2(self.eltwisemult_cell_input(i, g)), self.actq2(
            self.eltwisemult_cell_forget(f, c_prev))
        self.save_inner_data(save, prefix, 'ci', loop_id, ci)
        self.save_inner_data(save, prefix, 'cf', loop_id, cf)

        c = self.actq2(self.eltwiseadd_cell(cf, ci))
        h = self.actq1(self.eltwisemult_hidden(o, self.actq2(self.act_h(c))))
        self.save_inner_data(save, prefix, 'c', loop_id, c)
        self.save_inner_data(save, prefix, 'h', loop_id, h)

        self.save_inner_data(save, prefix, 'alpha1', 0, self.actq1.alpha)
        self.save_inner_data(save, prefix, 'alpha2', 0, self.actq2.alpha)
        self.save_inner_data(save, prefix, 'alpha3', 0, self.actq3.alpha)
        self.save_inner_data(save, prefix, 'alpha4', 0, self.actq4.alpha)

        return h, c
예제 #4
0
 def forward(self, x):
     if self.alpha is None or x.max() < 1e-6:
         assert ValueError
         return x
     if self.training and self.init_state == 0:
         # Please select a init_rate for activation.
         # self.alpha.data.copy_(x.max() / 2 ** (self.nbits - 1) * self.init_rate)
         if self.signed:
             alpha_fp = x.detach().abs().max() / 2**(self.nbits - 1)
         else:
             alpha_fp = x.detach().abs().max() / 2**self.nbits
         alpha_s = log_shift(alpha_fp)
         print('{}==>{}'.format(alpha_fp.item(), alpha_s.item()))
         self.alpha.data.copy_(alpha_s)
         self.init_state.fill_(1)
     alpha = self.alpha.detach()
     if self.signed:
         x_clip = (x / alpha).clamp(-2**(self.nbits - 1),
                                    2**(self.nbits - 1) - 1)
     else:
         x_clip = (x / alpha).clamp(0, 2**self.nbits - 1)
     x_round = x_clip.round()
     x_round = x_round * alpha
     x_clip = x_clip * alpha
     x_q = x_clip - x_clip.detach() + x_round.detach()
     return x_q
예제 #5
0
 def forward(self, x):
     if self.alpha is None:
         return F.linear(x, self.weight, self.bias)
     Qn = -2**(self.nbits - 1)
     Qp = 2**(self.nbits - 1) - 1
     w_reshape = self.weight.transpose(0, 1)
     if self.training and self.init_state == 0:
         self.init_state.fill_(1)
         alpha_fp = w_reshape.detach().abs().max() / (Qp + 1)
         alpha_s = log_shift(alpha_fp)
         print('{}==>{}'.format(alpha_fp.item(), alpha_s.item()))
         self.alpha.data.copy_(alpha_s)
     alpha = self.alpha.detach()
     w_q = (w_reshape / alpha).clamp(Qn, Qp).round() * alpha
     w_q = w_q.transpose(0, 1)
     return F.linear(x, w_q, self.bias)
예제 #6
0
파일: bwn.py 프로젝트: wm901115nwpu/LLSQ
 def forward(self, x, save=False):
     if self.alpha is None:
         return F.linear(x, self.weight, self.bias)
     if self.training and self.init_state == 0:
         alpha_fp = torch.mean(torch.abs(self.weight.data))
         alpha_s = log_shift(alpha_fp)
         if alpha_s >= 1:
             alpha_s /= 2
         print('{}==>{}'.format(alpha_fp.item(), alpha_s.item()))
         self.alpha.data.copy_(alpha_s)
         self.init_state.fill_(1)
     alpha = self.alpha.detach()
     pre_quantized_weight = self.weight / alpha
     quantized_weight = alpha * FunSign.apply(pre_quantized_weight)
     self.save_inner_data(save, 'fc', 'alpha', alpha)
     self.save_inner_data(save, 'fc', 'weight', quantized_weight)
     self.save_inner_data(save, 'fc', 'bias', self.bias)
     return F.linear(x, quantized_weight, self.bias)
예제 #7
0
파일: bwn.py 프로젝트: wm901115nwpu/LLSQ
    def forward(self, x):
        if self._bn.training:
            if self.alpha is not None and self.init_state != 0:
                w_reshape = self.weight.reshape([self.weight.shape[0], -1]).transpose(0, 1)
                alpha = self.alpha.detach()
                pre_quantized_weight = w_reshape / alpha
                quantized_weight = alpha * FunSign.apply(pre_quantized_weight)
                w_q = quantized_weight.transpose(0, 1).reshape(self.weight.shape)
            else:
                w_q = self.weight
            conv_out = F.conv2d(x, w_q, self.bias, self.stride,
                                self.padding, self.dilation, self.groups)
            # calculate mean and various
            fake_out = self._bn(conv_out)
            conv_out = conv_out.transpose(1, 0).contiguous()
            conv_out = conv_out.view(conv_out.size(0), -1)
            mu = conv_out.mean(dim=1)  # it is the same as mean calculated in _bn.
            var = torch.var(conv_out, dim=1, unbiased=False)
        else:
            mu = self._bn.running_mean
            var = self._bn.running_var
        if self._bn.affine:
            gamma = self._bn.weight
            beta = self._bn.bias
        else:
            gamma = torch.ones(self.out_channels).to(var.device)
            beta = torch.zeros(self.out_channels).to(var.device)

        A = gamma.div(torch.sqrt(var + self._bn.eps))
        A_expand = A.expand_as(self.weight.transpose(0, -1)).transpose(0, -1)

        weight_fold = self.weight * A_expand
        if self.bias is None:
            bias_fold = (- mu) * A + beta
        else:
            bias_fold = (self.bias - mu) * A + beta
        if self.alpha is None:
            return F.conv2d(x, weight_fold, bias_fold, self.stride,
                            self.padding, self.dilation, self.groups)
        w_reshape = weight_fold.reshape([self.weight.shape[0], -1]).transpose(0, 1)
        if self.training and self.init_state == 0:
            # self.alpha.data.copy_(torch.ones(1))
            if self.q_mode == Qmodes.layer_wise:
                alpha_fp = torch.mean(torch.abs(w_reshape.data))
                alpha_s = log_shift(alpha_fp)
                if alpha_s >= 1:
                    alpha_s /= 2
                print('{}==>{}'.format(alpha_fp.item(), alpha_s.item()))
            else:
                alpha_fp = torch.mean(torch.abs(w_reshape.data), dim=0)
                alpha_s = log_shift(alpha_fp)
                for i in range(len(alpha_s)):
                    if alpha_s[i] >= 1:
                        alpha_s /= 2
            self.alpha.data.copy_(alpha_s)
            self.init_state.fill_(1)

        alpha = self.alpha.detach()
        pre_quantized_weight = w_reshape / alpha
        quantized_weight = alpha * FunSign.apply(pre_quantized_weight)
        w_q = quantized_weight.transpose(0, 1).reshape(self.weight.shape)
        return F.conv2d(x, w_q, self.bias, self.stride,
                        self.padding, self.dilation, self.groups)