示例#1
0
    def __init__(self,  
                 mode="unipolar", 
                 rng="Sobol", 
                 rng_dim=1,
                 rng_width=8, 
                 rtype=torch.float,
                 stype=torch.float,
                 btype=torch.float):
        super(tanhP1, self).__init__()

        self.bitwidth = rng_width
        self.mode = mode
        self.rng = rng
        self.rng_dim = rng_dim
        self.rtype = rtype
        self.stype = stype
        self.btype = btype
        
        assert mode is "unipolar", "Combinational tanhP1 needs unipolar mode."
        self.rng_2 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+0, rng=self.rng, rtype=self.rtype)()
        self.rng_3 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+1, rng=self.rng, rtype=self.rtype)()
        self.rng_4 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+2, rng=self.rng, rtype=self.rtype)()
        self.rng_5 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+3, rng=self.rng, rtype=self.rtype)()    
        
        # constants used in computation
        self.n2_c = torch.tensor([62/153]).type(self.rtype)
        self.n3_c = torch.tensor([ 17/42]).type(self.rtype)
        self.n4_c = torch.tensor([   2/5]).type(self.rtype)
        self.n5_c = torch.tensor([   1/3]).type(self.rtype)

        self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode, self.rtype)()
        self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode, self.rtype)()
        self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode, self.rtype)()
        self.sg_n5_c = SourceGen(self.n5_c, self.bitwidth, self.mode, self.rtype)()

        self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)
        self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)
        self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)
        self.bs_n5_c = BSGen(self.sg_n5_c, self.rng_5, self.stype)

        # 4 dff in series
        self.input_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d4 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d5 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d6 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d7 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d8 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)

        self.n_1_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.n_1_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.n_1_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        
        self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long), requires_grad=False)
示例#2
0
文件: mul.py 项目: RuokaiYin/U-I-Sim
    def __init__(self,
                 bitwidth=8,
                 mode="bipolar",
                 static=True,
                 input_prob_1=None,
                 rtype=torch.float,
                 stype=torch.float):
        super(UnaryMul, self).__init__()

        self.bitwidth = bitwidth
        self.mode = mode
        self.static = static
        self.stype = stype
        self.rtype = rtype
        # the probability of input_1 used in static computation
        self.input_prob_1 = input_prob_1

        # the random number generator used in computation
        self.rng = RNG(bitwidth=self.bitwidth,
                       dim=1,
                       rng="Sobol",
                       rtype=self.rtype)()

        # currently only support static mode
        if self.static is True:
            # directly create an unchange bitstream generator for static computation
            self.source_gen = SourceGen(self.input_prob_1, self.bitwidth,
                                        self.mode, self.rtype)()
            self.bs = BSGen(self.source_gen, self.rng, torch.int8)
            # rng_idx is used later as an enable signal, get update every cycled
            self.rng_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                              requires_grad=False)

            # Generate two seperate bitstream generators and two enable signals for bipolar mode
            if self.mode is "bipolar":
                self.bs_inv = BSGen(self.source_gen, self.rng, torch.int8)
                self.rng_idx_inv = torch.nn.Parameter(torch.zeros(1).type(
                    torch.long),
                                                      requires_grad=False)
        else:
            raise ValueError("UnaryMul in-stream mode is not implemented.")
示例#3
0
    def __init__(self, 
                 in_features, 
                 out_features, 
                 binary_weight=None, 
                 binary_bias=None, 
                 bitwidth=8, 
                 bias=True, 
                 mode="bipolar", 
                 scaled=True, 
                 depth=8, 
                 rng_idx=1):
        super(GainesLinear4, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        
        # upper bound for accumulation counter in non-scaled mode
        self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.acc_bound.add_(in_features)
        if bias is True:
            self.acc_bound.add_(1)
            
        self.mode = mode
        self.scaled = scaled
        
        # accumulation offset
        self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        if mode is "unipolar":
            pass
        elif mode is "bipolar":
            self.offset.add_((in_features-1)/2)
            if bias is True:
                self.offset.add_(1/2)
        else:
            raise ValueError("UnaryLinear mode is not implemented.")
        
        # bias indication for original linear layer
        self.has_bias = bias
        
        # data bit width
        self.bitwidth = bitwidth
        
        # random_sequence from sobol RNG
        self.rng = RNGMulti(self.bitwidth, in_features, "LFSR")()
        self.rng_bias = RNG(self.bitwidth, in_features+1, "LFSR")()
        
        # define the convolution weight and bias
        self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
        if self.has_bias is True:
            self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
        
        # define the kernel linear
        self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
        self.buf_wght_bs = BSGenMulti(self.buf_wght, self.rng, dim=0)
        self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
        if self.has_bias is True:
            self.buf_bias_bs = BSGen(self.buf_bias, self.rng_bias)
            self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
        
        # if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
        if self.mode is "bipolar":
            self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)

        self.parallel_cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
        
        if self.scaled is True:
            self.rng_scale = RNG(round(math.log2(self.acc_bound.item())), (rng_idx+5)%1111, "LFSR")()
            self.rng_scale_idx = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
        elif self.scaled is False:
            self.input_cnt = self.acc_bound.item()
            self.max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**depth-1), requires_grad=False)
            self.half_max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
            self.cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
示例#4
0
    def __init__(self, 
                 in_features, 
                 out_features, 
                 binary_weight=None, 
                 binary_bias=None, 
                 bitwidth=8, 
                 bias=True, 
                 mode="bipolar", 
                 scaled=True, 
                 btype=torch.float, 
                 rtype=torch.float, 
                 stype=torch.float):
        super(UnaryLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.stype = stype
        self.btype = btype
        self.rtype = rtype
        
        # upper bound for accumulation counter in scaled mode
        self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.acc_bound.add_(in_features)
        if bias is True:
            self.acc_bound.add_(1)
            
        self.mode = mode
        self.scaled = scaled
        
        # accumulation offset
        self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        if mode is "unipolar":
            pass
        elif mode is "bipolar":
            self.offset.add_((in_features-1)/2)
            if bias is True:
                self.offset.add_(1/2)
        else:
            raise ValueError("UnaryLinear mode is not implemented.")
        
        # bias indication for original linear layer
        self.has_bias = bias
        
        # data bit width
        self.bitwidth = bitwidth
        
        # random_sequence from sobol RNG
        self.rng = RNG(self.bitwidth, 1, "Sobol")()
        
        # define the convolution weight and bias
        self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
        if self.has_bias is True:
            self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()

        # define the kernel linear
        self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
        self.buf_wght_bs = BSGen(self.buf_wght, self.rng, stype=stype)
        self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
        if self.has_bias is True:
            self.buf_bias_bs = BSGen(self.buf_bias, self.rng, stype=stype)
            self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
        
        # if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
        if self.mode is "bipolar":
            self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
            self.buf_wght_bs_inv = BSGen(self.buf_wght, self.rng, stype=stype)
            self.rng_wght_idx_inv = torch.nn.Parameter(torch.zeros_like(self.kernel_inv.weight, dtype=torch.long), requires_grad=False)

        self.accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        if self.scaled is False:
            self.out_accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
示例#5
0
    def __init__(self,
                 bitwidth=8,
                 mode="bipolar",
                 normstability=0.5,
                 threshold=0.05,
                 value=None,
                 rng_dim=1,
                 rng="Sobol",
                 rtype=torch.float,
                 stype=torch.float):
        super(NSbuilder, self).__init__()
        
        self.bitwidth = bitwidth
        self.normstb = normstability
        if mode is "bipolar":
            self.val = (value + 1) /2
            self.T = threshold / 2
        elif mode is "unipolar":
            self.val = value
            self.T = threshold
        self.mode = mode
        self.val_shape = self.val.size()
        self.val_dim = len(self.val_shape)

        self.stype = stype
        self.rtype = rtype
        
        self.L = torch.nn.Parameter(torch.tensor([2**self.bitwidth]).type(self.val.dtype), requires_grad=False)
        self.lp = torch.zeros_like(self.val)
        self.R = torch.ones_like(self.val)

        self.P_low = torch.zeros_like(self.val)
        self.P_up = torch.zeros_like(self.val)

        self.max_stable = torch.zeros_like(self.val)
        self.max_st_len = torch.zeros_like(self.val)
        self.new_st_len = torch.zeros_like(self.val)
        self.new_ns_len = torch.zeros_like(self.val)
        
        self.new_ns_val = torch.zeros_like(self.val)
        self.new_st_val = torch.zeros_like(self.val)
        self.new_ns_one = torch.zeros_like(self.val)
        self.new_st_one = torch.zeros_like(self.val)

        self.rng = RNG(
            bitwidth=bitwidth,
            dim=rng_dim,
            rng=rng,
            rtype=rtype)()

        self.ns_gen = torch.ones_like(self.val).type(torch.bool)
        self.st_gen = torch.zeros_like(self.val).type(torch.bool)
        
        self.out_cnt_ns = torch.zeros_like(self.val).type(torch.int32)
        self.out_cnt_st = torch.zeros_like(self.val).type(torch.int32)

        self.output = torch.zeros_like(self.val).type(stype)

        ## INIT:
        # Stage to calculate several essential params
        self.P_low = torch.max(self.val - self.T, torch.zeros_like(self.val))
        self.P_up = torch.min(torch.ones_like(self.val), self.val + self.T)
        upper = torch.min(torch.ceil(self.L * self.P_up), self.L)
        lower = torch.max(torch.floor(self.L * self.P_low), torch.zeros_like(self.L))
        
        seach_range = (self.T * 2 * self.L + 1).type(torch.int32)
        
        max_stab_len, max_stab_R, max_stab_l_p = search_best_stab_parallel_numpy(lower.type(torch.int32).numpy(), 
                                                                                 upper.type(torch.int32).numpy(), 
                                                                                 self.L.type(torch.int32).numpy(), 
                                                                                 seach_range.numpy())
        
        self.max_stable = torch.from_numpy(max_stab_len)
        self.lp = torch.from_numpy(max_stab_l_p)
        self.R = torch.from_numpy(max_stab_R)
        
        self.max_st_len = self.L - (self.max_stable)
        self.new_st_len = torch.ceil(self.max_st_len * self.normstb)
        self.new_ns_len = (self.L - self.new_st_len)
        
        val_gt_half = (self.val > 0.5).type(torch.float)
        self.new_ns_one = val_gt_half * (self.P_up * (self.new_ns_len + 1)) \
                        + (1 - val_gt_half) * torch.max((self.P_low * (self.new_ns_len + 1) - 1), torch.zeros_like(self.L))

        self.new_st_one = (self.val * self.L - self.new_ns_one)
        self.new_ns_val = self.new_ns_one / self.new_ns_len
        self.new_st_val = self.new_st_one / self.new_st_len

        self.src_st = SourceGen(self.new_st_val, self.bitwidth, "unipolar", self.rtype)()
        self.src_ns = SourceGen(self.new_ns_val, self.bitwidth, "unipolar", self.rtype)()
        self.bs_st = BSGen(self.src_st, self.rng)
        self.bs_ns = BSGen(self.src_ns, self.rng)
示例#6
0
    def __init__(self,
                 mode="unipolar",
                 rng="Sobol",
                 rng_dim=1,
                 rng_width=8,
                 rtype=torch.float,
                 stype=torch.float,
                 btype=torch.float):
        super(expComb, self).__init__()

        self.bitwidth = rng_width
        self.mode = mode
        self.rng = rng
        self.rng_dim = rng_dim
        self.rtype = rtype
        self.stype = stype
        self.btype = btype

        # If a unit is named as n_x, it will be used in the calculation of n_x+1.
        self.rng_1 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_2 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 1,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_3 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 2,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_4 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 3,
                         rng=self.rng,
                         rtype=self.rtype)()

        # constants used in computation
        self.n1_c = torch.tensor([0.2000]).type(self.rtype)
        self.sg_n1_c = SourceGen(self.n1_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n1_c = BSGen(self.sg_n1_c, self.rng_1, self.stype)

        self.n2_c = torch.tensor([0.2500]).type(self.rtype)
        self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)

        self.n3_c = torch.tensor([0.3333]).type(self.rtype)
        self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)

        self.n4_c = torch.tensor([0.5000]).type(self.rtype)
        self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)

        self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                         requires_grad=False)

        # dff to prevent correlation
        self.input_d1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                           requires_grad=False)
        self.input_d2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                           requires_grad=False)
        self.input_d3 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                           requires_grad=False)
        self.input_d4 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                           requires_grad=False)
示例#7
0
文件: exp.py 项目: RuokaiYin/U-I-Sim
    def __init__(self,
                 mode="unipolar",
                 rng="Sobol",
                 rng_dim=1,
                 rng_width=8,
                 rtype=torch.float,
                 stype=torch.float,
                 btype=torch.float):
        super(expN1, self).__init__()

        self.bitwidth = rng_width
        self.mode = mode
        self.rng = rng
        self.rng_dim = rng_dim
        self.rtype = rtype
        self.stype = stype
        self.btype = btype

        assert mode is "unipolar", "Combinational expN1 needs unipolar mode."
        self.rng_1 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 0,
                         rng=self.rng,
                         rtype=self.rtype)()
        self.rng_2 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 1,
                         rng=self.rng,
                         rtype=self.rtype)()
        self.rng_3 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 2,
                         rng=self.rng,
                         rtype=self.rtype)()
        self.rng_4 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 3,
                         rng=self.rng,
                         rtype=self.rtype)()

        # constants used in computation
        self.n1_c = torch.tensor([0.2000]).type(self.rtype)
        self.n2_c = torch.tensor([0.2500]).type(self.rtype)
        self.n3_c = torch.tensor([0.3333]).type(self.rtype)
        self.n4_c = torch.tensor([0.5000]).type(self.rtype)

        self.sg_n1_c = SourceGen(self.n1_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode,
                                 self.rtype)()

        self.bs_n1_c = BSGen(self.sg_n1_c, self.rng_1, self.stype)
        self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)
        self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)
        self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)

        # dff as isolation to mitigate correlation
        self.input_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype),
                                           requires_grad=False)
        self.input_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype),
                                           requires_grad=False)
        self.input_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype),
                                           requires_grad=False)
        self.input_d4 = torch.nn.Parameter(torch.zeros(1).type(self.stype),
                                           requires_grad=False)

        self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                         requires_grad=False)
示例#8
0
    def __init__(self,
                 mode="unipolar",
                 rng="Sobol",
                 rng_dim=1,
                 rng_width=8,
                 rtype=torch.float,
                 stype=torch.float,
                 btype=torch.float):
        super(tanhComb, self).__init__()

        self.bitwidth = rng_width
        self.mode = mode
        self.rng = rng
        self.rng_dim = rng_dim
        self.rtype = rtype
        self.stype = stype
        self.btype = btype

        # If a unit is named as n_x, it will be used in the calculation of n_x+1.
        self.rng_2 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_3 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 1,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_4 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 2,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_5 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 3,
                         rng=self.rng,
                         rtype=self.rtype)()

        # constants used in computation
        self.n2_c = torch.tensor([62 / 153]).type(self.rtype)
        self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)

        self.n3_c = torch.tensor([17 / 42]).type(self.rtype)
        self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)

        self.n4_c = torch.tensor([2 / 5]).type(self.rtype)
        self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)

        self.n5_c = torch.tensor([1 / 3]).type(self.rtype)
        self.sg_n5_c = SourceGen(self.n5_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n5_c = BSGen(self.sg_n5_c, self.rng_5, self.stype)

        self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                         requires_grad=False)

        # dff to prevent correlation

        # 4 dff in series
        self.input_4d1_1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d2_1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d3_1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d4_1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)

        self.d1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                     requires_grad=False)
        self.d2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                     requires_grad=False)
        self.d3 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                     requires_grad=False)

        # 4 dff in series
        self.input_4d1_2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d2_2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d3_2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d4_2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
示例#9
0
    def __init__(self, 
                 in_features, 
                 out_features, 
                 weight_r=None, 
                 weight_i=None, 
                 bitwidth=8, 
                 scaled=False,
                 btype=torch.float,
                 rtype=torch.float,
                 stype=torch.float):
        super(UnaryLinearComplex, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.scaled = scaled
        self.stype = stype
        
        # upper bound for accumulation counter in scaled mode
        self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.acc_bound.add_(in_features*2)
        
        # accumulation offset for bipolar
        self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.offset.add_((in_features*2-1)/2)
        
        # data bit width
        self.bitwidth = bitwidth
        
        # random_sequence from sobol RNG, only one type of RNG is required
        self.rng = RNG(self.bitwidth, 1, "Sobol", rtype)()
        
        # define the convolution weight and bias
        self.buf_wght_r = SourceGen(weight_r, bitwidth=self.bitwidth, mode="bipolar", rtype=rtype)()
        self.buf_wght_i = SourceGen(weight_i, bitwidth=self.bitwidth, mode="bipolar", rtype=rtype)()

        # define the kernel linear for different parts
        # 1. real feature and real weight
        self.kernel_1_fr_wr       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_1_fr_wr  = BSGen(self.buf_wght_r, self.rng, stype=stype)
        self.rng_wght_idx_1_fr_wr = torch.nn.Parameter(torch.zeros_like(self.kernel_1_fr_wr.weight, 
                                                                        dtype=torch.long), requires_grad=False)

        self.kernel_0_fr_wr       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_0_fr_wr  = BSGen(self.buf_wght_r, self.rng, stype=stype)
        self.rng_wght_idx_0_fr_wr = torch.nn.Parameter(torch.zeros_like(self.kernel_0_fr_wr.weight, 
                                                                        dtype=torch.long), requires_grad=False)
        
        # 2. real feature and image weight
        self.kernel_1_fr_wi       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_1_fr_wi  = BSGen(self.buf_wght_i, self.rng, stype=stype)
        self.rng_wght_idx_1_fr_wi = torch.nn.Parameter(torch.zeros_like(self.kernel_1_fr_wi.weight, 
                                                                        dtype=torch.long), requires_grad=False)

        self.kernel_0_fr_wi       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_0_fr_wi  = BSGen(self.buf_wght_i, self.rng, stype=stype)
        self.rng_wght_idx_0_fr_wi = torch.nn.Parameter(torch.zeros_like(self.kernel_0_fr_wi.weight, 
                                                                        dtype=torch.long), requires_grad=False)
        
        # 3. image feature and real weight
        self.kernel_1_fi_wr       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_1_fi_wr  = BSGen(self.buf_wght_r, self.rng, stype=stype)
        self.rng_wght_idx_1_fi_wr = torch.nn.Parameter(torch.zeros_like(self.kernel_1_fi_wr.weight, 
                                                                        dtype=torch.long), requires_grad=False)

        self.kernel_0_fi_wr       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_0_fi_wr  = BSGen(self.buf_wght_r, self.rng, stype=stype)
        self.rng_wght_idx_0_fi_wr = torch.nn.Parameter(torch.zeros_like(self.kernel_0_fi_wr.weight, 
                                                                        dtype=torch.long), requires_grad=False)
        
        # 4. image feature and image weight
        self.kernel_1_fi_wi       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_1_fi_wi  = BSGen(self.buf_wght_i, self.rng, stype=stype)
        self.rng_wght_idx_1_fi_wi = torch.nn.Parameter(torch.zeros_like(self.kernel_1_fi_wi.weight, 
                                                                        dtype=torch.long), requires_grad=False)

        self.kernel_0_fi_wi       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_0_fi_wi  = BSGen(self.buf_wght_i, self.rng, stype=stype)
        self.rng_wght_idx_0_fi_wi = torch.nn.Parameter(torch.zeros_like(self.kernel_0_fi_wi.weight, 
                                                                        dtype=torch.long), requires_grad=False)
        
        # define the accumulator for real and image parts of output
        # real
        self.accumulator_r = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.out_accumulator_r = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        # image
        self.accumulator_i = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.out_accumulator_i = torch.nn.Parameter(torch.zeros(1), requires_grad=False)