Example #1
0
    def __init__(self,
                 mode="bipolar",
                 scaled=True,
                 acc_dim=0,
                 rng="Sobol",
                 rng_dim=5,
                 rng_width=8,
                 rtype=torch.float,
                 stype=torch.float):
        super(GainesAdd, self).__init__()

        # data representation
        self.mode = mode
        # whether it is scaled addition
        self.scaled = scaled
        if self.mode is "bipolar" and self.scaled is False:
            raise ValueError(
                "Non-scaled addition for biploar data is not supported in Gaines approach."
            )
        # dimension to do reduce sum
        self.acc_dim = torch.nn.Parameter(torch.zeros(1).type(torch.int8),
                                          requires_grad=False)
        self.stype = stype
        self.rng = RNG(rng_width, rng_dim, rng, rtype=rtype)()
        self.rng_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                          requires_grad=False)
Example #2
0
    def __init__(self,  
                 mode="unipolar", 
                 rng="Sobol", 
                 rng_dim=1,
                 rng_width=8, 
                 rtype=torch.float,
                 stype=torch.float,
                 btype=torch.float):
        super(tanhP1, self).__init__()

        self.bitwidth = rng_width
        self.mode = mode
        self.rng = rng
        self.rng_dim = rng_dim
        self.rtype = rtype
        self.stype = stype
        self.btype = btype
        
        assert mode is "unipolar", "Combinational tanhP1 needs unipolar mode."
        self.rng_2 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+0, rng=self.rng, rtype=self.rtype)()
        self.rng_3 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+1, rng=self.rng, rtype=self.rtype)()
        self.rng_4 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+2, rng=self.rng, rtype=self.rtype)()
        self.rng_5 = RNG(bitwidth=self.bitwidth, dim=self.rng_dim+3, rng=self.rng, rtype=self.rtype)()    
        
        # constants used in computation
        self.n2_c = torch.tensor([62/153]).type(self.rtype)
        self.n3_c = torch.tensor([ 17/42]).type(self.rtype)
        self.n4_c = torch.tensor([   2/5]).type(self.rtype)
        self.n5_c = torch.tensor([   1/3]).type(self.rtype)

        self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode, self.rtype)()
        self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode, self.rtype)()
        self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode, self.rtype)()
        self.sg_n5_c = SourceGen(self.n5_c, self.bitwidth, self.mode, self.rtype)()

        self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)
        self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)
        self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)
        self.bs_n5_c = BSGen(self.sg_n5_c, self.rng_5, self.stype)

        # 4 dff in series
        self.input_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d4 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d5 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d6 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d7 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.input_d8 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)

        self.n_1_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.n_1_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        self.n_1_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype), requires_grad=False)
        
        self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long), requires_grad=False)
Example #3
0
 def __init__(self, 
              mode="bipolar", 
              jk_trace=True, 
              depth_kernel=1, 
              rng="Sobol", 
              rng_dim=4, 
              emit=True, 
              depth_sr=2, 
              stype=torch.float):
     super(UnarySqrt, self).__init__()
     
     assert math.ceil(math.log2(depth_kernel)) == math.floor(math.log2(depth_kernel)) , "Input depth_kernel needs to be power of 2."
     assert math.ceil(math.log2(depth_sr)) == math.floor(math.log2(depth_sr)) , "Input depth_sr needs to be power of 2."
     self.mode = mode
     self.stype = stype
     self.jk_trace = jk_trace
     self.emit = emit
     if emit is True:
         self.emit_out = torch.nn.Parameter(torch.zeros(1).type(torch.int8), requires_grad=False)
         self.nsadd = UnaryAdd(mode="unipolar", scaled=False, acc_dim=0, stype=torch.int8)
         self.sr = ShiftReg(depth_sr, stype=torch.int8)
         self.depth_sr = depth_sr
         self.rng = RNG(int(math.log2(depth_sr)), rng_dim, rng, torch.long)()
         self.idx = torch.nn.Parameter(torch.zeros(1).type(torch.long), requires_grad=False)
         if mode is "bipolar":
             self.bi2uni_emit = Bi2Uni(stype=torch.int8)
     else:
         self.trace = torch.nn.Parameter(torch.zeros(1).type(torch.int8), requires_grad=False)
         if mode is "bipolar":
             self.bi2uni = Bi2Uni(stype=torch.int8)
         if jk_trace is True:
             self.jkff = JKFF(stype=torch.int8)
         else:
             self.cordiv_kernel = CORDIV_kernel(depth=depth_kernel, rng=rng, rng_dim=rng_dim, stype=torch.int8)
             self.dff = torch.nn.Parameter(torch.zeros(1).type(torch.int8), requires_grad=False)
Example #4
0
 def __init__(self, 
              depth=5, 
              mode="bipolar", 
              rng="Sobol", 
              rng_dim=1, 
              stype=torch.float):
     super(GainesSqrt, self).__init__()
     
     # data representation
     self.mode = mode
     self.scnt_max = torch.nn.Parameter(torch.tensor([2**depth-1]).type(torch.float), requires_grad=False)
     self.scnt = torch.nn.Parameter(torch.tensor([2**(depth-1)]).type(torch.float), requires_grad=False)
     self.rng = RNG(depth, rng_dim, rng, torch.float)()
     self.rng_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long), requires_grad=False)
     self.out_d = torch.nn.Parameter(torch.zeros(1).type(torch.int8), requires_grad=False)
     self.stype = stype
Example #5
0
class GainesDiv(torch.nn.Module):
    """
    this module is for Gaines division.
    """
    def __init__(self,
                 depth=5,
                 mode="bipolar",
                 rng="Sobol",
                 rng_dim=1,
                 stype=torch.float):
        super(GainesDiv, self).__init__()

        # data representation
        self.mode = mode
        self.scnt_max = torch.nn.Parameter(torch.tensor([2**depth - 1
                                                         ]).type(torch.float),
                                           requires_grad=False)
        self.scnt = torch.nn.Parameter(torch.tensor([2**(depth - 1)
                                                     ]).type(torch.float),
                                       requires_grad=False)
        self.rng = RNG(depth, rng_dim, rng, torch.float)()
        self.rng_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                          requires_grad=False)
        self.divisor_d = torch.nn.Parameter(torch.zeros(1).type(torch.int8),
                                            requires_grad=False)
        self.stype = stype

    def forward(self, dividend, divisor):
        # output is the same for both bipolar and unipolar
        output = torch.gt(self.scnt,
                          self.rng[self.rng_idx % self.rng.numel()]).type(
                              torch.int8)
        self.rng_idx.data = self.rng_idx + 1
        output = output + torch.zeros_like(dividend, dtype=torch.int8)

        if self.mode is "unipolar":
            inc = dividend.type(torch.float)
            dec = (output & divisor.type(torch.int8)).type(torch.float)
        else:
            dd_ds = 1 - (dividend.type(torch.int8) ^ divisor.type(torch.int8))
            ds_ds = 1 - (self.divisor_d ^ divisor.type(torch.int8))
            self.divisor_d.data = divisor.type(torch.int8)
            ds_ds_out = 1 - (ds_ds ^ (1 - output))
            inc = (dd_ds & ds_ds_out).type(torch.float)
            dec = ((1 - dd_ds) & (1 - ds_ds_out)).type(torch.float)

            # following implementation is not good for accuracy due to fluctuation of negative output.
            # inc = dividend.type(torch.float)
            # dec = (1 - output ^ divisor.type(torch.int8)).type(torch.float)

        # scnt is also the same in terms of the up/down behavior and comparison
        self.scnt.data = (inc * (self.scnt + 1) + (1 - inc) * self.scnt).view(
            dividend.size())
        self.scnt.data = (dec * (self.scnt - 1) + (1 - dec) * self.scnt)
        self.scnt.data = self.scnt.clamp(0, self.scnt_max.item())

        return output.type(self.stype)
Example #6
0
 def __init__(self, depth=4, rng="Sobol", rng_dim=4, stype=torch.float):
     super(CORDIV_kernel, self).__init__()
     self.depth = depth
     self.sr = ShiftReg(depth, stype)
     self.rng = RNG(int(math.log2(depth)), rng_dim, rng, torch.long)()
     self.idx = torch.nn.Parameter(torch.zeros(1).type(torch.float),
                                   requires_grad=False)
     self.stype = stype
     self.init = torch.nn.Parameter(torch.ones(1).type(torch.bool),
                                    requires_grad=False)
     self.historic_q = torch.nn.Parameter(torch.ones(1).type(stype),
                                          requires_grad=False)
Example #7
0
class GainesAdd(torch.nn.Module):
    """
    this module is for Gaines addition.
    1) MUX for scaled addition
    2) OR gate for non-scaled addition
    """
    def __init__(self,
                 mode="bipolar",
                 scaled=True,
                 acc_dim=0,
                 rng="Sobol",
                 rng_dim=5,
                 rng_width=8,
                 rtype=torch.float,
                 stype=torch.float):
        super(GainesAdd, self).__init__()

        # data representation
        self.mode = mode
        # whether it is scaled addition
        self.scaled = scaled
        if self.mode is "bipolar" and self.scaled is False:
            raise ValueError(
                "Non-scaled addition for biploar data is not supported in Gaines approach."
            )
        # dimension to do reduce sum
        self.acc_dim = torch.nn.Parameter(torch.zeros(1).type(torch.int8),
                                          requires_grad=False)
        self.stype = stype
        self.rng = RNG(rng_width, rng_dim, rng, rtype=rtype)()
        self.rng_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                          requires_grad=False)

    def forward(self, input):
        if self.scaled is True:
            randNum = self.rng[self.rng_idx.item()]
            assert randNum.item() < input.size()[self.acc_dim.item(
            )], "randNum should be smaller than the dimension size of addition."
            # using a MUX for both unipolar and bipolar
            output = torch.unbind(
                torch.index_select(input, self.acc_dim.item(),
                                   randNum.type(torch.long).view(1)),
                self.acc_dim.item())[0]
            self.rng_idx.data = self.rng_idx.add(1) % self.rng.numel()
        else:
            # only support unipolar data using an OR gate
            output = torch.gt(torch.sum(input, self.acc_dim.item()), 0)

        return output.type(self.stype)
Example #8
0
class GainesSqrt(torch.nn.Module):
    """
    this module is for Gaines square root.
    """
    def __init__(self, 
                 depth=5, 
                 mode="bipolar", 
                 rng="Sobol", 
                 rng_dim=1, 
                 stype=torch.float):
        super(GainesSqrt, self).__init__()
        
        # data representation
        self.mode = mode
        self.scnt_max = torch.nn.Parameter(torch.tensor([2**depth-1]).type(torch.float), requires_grad=False)
        self.scnt = torch.nn.Parameter(torch.tensor([2**(depth-1)]).type(torch.float), requires_grad=False)
        self.rng = RNG(depth, rng_dim, rng, torch.float)()
        self.rng_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long), requires_grad=False)
        self.out_d = torch.nn.Parameter(torch.zeros(1).type(torch.int8), requires_grad=False)
        self.stype = stype
        
    def forward(self, input):
        # output is the same for both bipolar and unipolar
        output = torch.gt(self.scnt, self.rng[self.rng_idx%self.rng.numel()]).type(torch.int8)
        self.rng_idx.data = self.rng_idx + 1
        output = output + torch.zeros_like(input, dtype=torch.int8)
        
        if self.mode is "unipolar":
            inc = input.type(torch.float)
            dec = (output & self.out_d).type(torch.float)
            self.out_d.data = output.type(torch.int8)
        else:
            # this is not a good implementation
            # prod = 1 - output ^ self.out_d
            # inc = (input.type(torch.int8) & prod).type(torch.float)
            # dec = ((1 - input).type(torch.int8) & (1 - prod)).type(torch.float)
            # self.out_d.data = output.type(torch.int8)
            
            inc = input.type(torch.float)
            dec = (1 - output ^ self.out_d).type(torch.float)
            self.out_d.data = output.type(torch.int8)
            
        # scnt is also the same in terms of the up/down behavior and comparison
        self.scnt.data = (inc * (self.scnt + 1) + (1 - inc) * self.scnt).view(input.size())
        self.scnt.data = (dec * (self.scnt - 1) + (1 - dec) * self.scnt)
        self.scnt.data = self.scnt.clamp(0, self.scnt_max.item())
        
        return output.type(self.stype)
Example #9
0
    def __init__(self,
                 bitwidth=8,
                 mode="bipolar",
                 static=True,
                 input_prob_1=None,
                 rtype=torch.float,
                 stype=torch.float):
        super(UnaryMul, self).__init__()

        self.bitwidth = bitwidth
        self.mode = mode
        self.static = static
        self.stype = stype
        self.rtype = rtype
        # the probability of input_1 used in static computation
        self.input_prob_1 = input_prob_1

        # the random number generator used in computation
        self.rng = RNG(bitwidth=self.bitwidth,
                       dim=1,
                       rng="Sobol",
                       rtype=self.rtype)()

        # currently only support static mode
        if self.static is True:
            # directly create an unchange bitstream generator for static computation
            self.source_gen = SourceGen(self.input_prob_1, self.bitwidth,
                                        self.mode, self.rtype)()
            self.bs = BSGen(self.source_gen, self.rng, torch.int8)
            # rng_idx is used later as an enable signal, get update every cycled
            self.rng_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                              requires_grad=False)

            # Generate two seperate bitstream generators and two enable signals for bipolar mode
            if self.mode is "bipolar":
                self.bs_inv = BSGen(self.source_gen, self.rng, torch.int8)
                self.rng_idx_inv = torch.nn.Parameter(torch.zeros(1).type(
                    torch.long),
                                                      requires_grad=False)
        else:
            raise ValueError("UnaryMul in-stream mode is not implemented.")
Example #10
0
    def __init__(self, 
                 in_features, 
                 out_features, 
                 binary_weight=None, 
                 binary_bias=None, 
                 bitwidth=8, 
                 bias=True, 
                 mode="bipolar", 
                 scaled=True, 
                 depth=8, 
                 rng_idx=1):
        super(GainesLinear4, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        
        # upper bound for accumulation counter in non-scaled mode
        self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.acc_bound.add_(in_features)
        if bias is True:
            self.acc_bound.add_(1)
            
        self.mode = mode
        self.scaled = scaled
        
        # accumulation offset
        self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        if mode is "unipolar":
            pass
        elif mode is "bipolar":
            self.offset.add_((in_features-1)/2)
            if bias is True:
                self.offset.add_(1/2)
        else:
            raise ValueError("UnaryLinear mode is not implemented.")
        
        # bias indication for original linear layer
        self.has_bias = bias
        
        # data bit width
        self.bitwidth = bitwidth
        
        # random_sequence from sobol RNG
        self.rng = RNGMulti(self.bitwidth, in_features, "LFSR")()
        self.rng_bias = RNG(self.bitwidth, in_features+1, "LFSR")()
        
        # define the convolution weight and bias
        self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
        if self.has_bias is True:
            self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
        
        # define the kernel linear
        self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
        self.buf_wght_bs = BSGenMulti(self.buf_wght, self.rng, dim=0)
        self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
        if self.has_bias is True:
            self.buf_bias_bs = BSGen(self.buf_bias, self.rng_bias)
            self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
        
        # if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
        if self.mode is "bipolar":
            self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)

        self.parallel_cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
        
        if self.scaled is True:
            self.rng_scale = RNG(round(math.log2(self.acc_bound.item())), (rng_idx+5)%1111, "LFSR")()
            self.rng_scale_idx = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
        elif self.scaled is False:
            self.input_cnt = self.acc_bound.item()
            self.max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**depth-1), requires_grad=False)
            self.half_max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
            self.cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
Example #11
0
    def __init__(self, 
                 in_features, 
                 out_features, 
                 binary_weight=None, 
                 binary_bias=None, 
                 bitwidth=8, 
                 bias=True, 
                 mode="bipolar", 
                 scaled=True, 
                 btype=torch.float, 
                 rtype=torch.float, 
                 stype=torch.float):
        super(UnaryLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.stype = stype
        self.btype = btype
        self.rtype = rtype
        
        # upper bound for accumulation counter in scaled mode
        self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.acc_bound.add_(in_features)
        if bias is True:
            self.acc_bound.add_(1)
            
        self.mode = mode
        self.scaled = scaled
        
        # accumulation offset
        self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        if mode is "unipolar":
            pass
        elif mode is "bipolar":
            self.offset.add_((in_features-1)/2)
            if bias is True:
                self.offset.add_(1/2)
        else:
            raise ValueError("UnaryLinear mode is not implemented.")
        
        # bias indication for original linear layer
        self.has_bias = bias
        
        # data bit width
        self.bitwidth = bitwidth
        
        # random_sequence from sobol RNG
        self.rng = RNG(self.bitwidth, 1, "Sobol")()
        
        # define the convolution weight and bias
        self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
        if self.has_bias is True:
            self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()

        # define the kernel linear
        self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
        self.buf_wght_bs = BSGen(self.buf_wght, self.rng, stype=stype)
        self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
        if self.has_bias is True:
            self.buf_bias_bs = BSGen(self.buf_bias, self.rng, stype=stype)
            self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
        
        # if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
        if self.mode is "bipolar":
            self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
            self.buf_wght_bs_inv = BSGen(self.buf_wght, self.rng, stype=stype)
            self.rng_wght_idx_inv = torch.nn.Parameter(torch.zeros_like(self.kernel_inv.weight, dtype=torch.long), requires_grad=False)

        self.accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        if self.scaled is False:
            self.out_accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
Example #12
0
    def __init__(self,
                 bitwidth=8,
                 mode="bipolar",
                 normstability=0.5,
                 threshold=0.05,
                 value=None,
                 rng_dim=1,
                 rng="Sobol",
                 rtype=torch.float,
                 stype=torch.float):
        super(NSbuilder, self).__init__()
        
        self.bitwidth = bitwidth
        self.normstb = normstability
        if mode is "bipolar":
            self.val = (value + 1) /2
            self.T = threshold / 2
        elif mode is "unipolar":
            self.val = value
            self.T = threshold
        self.mode = mode
        self.val_shape = self.val.size()
        self.val_dim = len(self.val_shape)

        self.stype = stype
        self.rtype = rtype
        
        self.L = torch.nn.Parameter(torch.tensor([2**self.bitwidth]).type(self.val.dtype), requires_grad=False)
        self.lp = torch.zeros_like(self.val)
        self.R = torch.ones_like(self.val)

        self.P_low = torch.zeros_like(self.val)
        self.P_up = torch.zeros_like(self.val)

        self.max_stable = torch.zeros_like(self.val)
        self.max_st_len = torch.zeros_like(self.val)
        self.new_st_len = torch.zeros_like(self.val)
        self.new_ns_len = torch.zeros_like(self.val)
        
        self.new_ns_val = torch.zeros_like(self.val)
        self.new_st_val = torch.zeros_like(self.val)
        self.new_ns_one = torch.zeros_like(self.val)
        self.new_st_one = torch.zeros_like(self.val)

        self.rng = RNG(
            bitwidth=bitwidth,
            dim=rng_dim,
            rng=rng,
            rtype=rtype)()

        self.ns_gen = torch.ones_like(self.val).type(torch.bool)
        self.st_gen = torch.zeros_like(self.val).type(torch.bool)
        
        self.out_cnt_ns = torch.zeros_like(self.val).type(torch.int32)
        self.out_cnt_st = torch.zeros_like(self.val).type(torch.int32)

        self.output = torch.zeros_like(self.val).type(stype)

        ## INIT:
        # Stage to calculate several essential params
        self.P_low = torch.max(self.val - self.T, torch.zeros_like(self.val))
        self.P_up = torch.min(torch.ones_like(self.val), self.val + self.T)
        upper = torch.min(torch.ceil(self.L * self.P_up), self.L)
        lower = torch.max(torch.floor(self.L * self.P_low), torch.zeros_like(self.L))
        
        seach_range = (self.T * 2 * self.L + 1).type(torch.int32)
        
        max_stab_len, max_stab_R, max_stab_l_p = search_best_stab_parallel_numpy(lower.type(torch.int32).numpy(), 
                                                                                 upper.type(torch.int32).numpy(), 
                                                                                 self.L.type(torch.int32).numpy(), 
                                                                                 seach_range.numpy())
        
        self.max_stable = torch.from_numpy(max_stab_len)
        self.lp = torch.from_numpy(max_stab_l_p)
        self.R = torch.from_numpy(max_stab_R)
        
        self.max_st_len = self.L - (self.max_stable)
        self.new_st_len = torch.ceil(self.max_st_len * self.normstb)
        self.new_ns_len = (self.L - self.new_st_len)
        
        val_gt_half = (self.val > 0.5).type(torch.float)
        self.new_ns_one = val_gt_half * (self.P_up * (self.new_ns_len + 1)) \
                        + (1 - val_gt_half) * torch.max((self.P_low * (self.new_ns_len + 1) - 1), torch.zeros_like(self.L))

        self.new_st_one = (self.val * self.L - self.new_ns_one)
        self.new_ns_val = self.new_ns_one / self.new_ns_len
        self.new_st_val = self.new_st_one / self.new_st_len

        self.src_st = SourceGen(self.new_st_val, self.bitwidth, "unipolar", self.rtype)()
        self.src_ns = SourceGen(self.new_ns_val, self.bitwidth, "unipolar", self.rtype)()
        self.bs_st = BSGen(self.src_st, self.rng)
        self.bs_ns = BSGen(self.src_ns, self.rng)
Example #13
0
    def __init__(self,
                 mode="unipolar",
                 rng="Sobol",
                 rng_dim=1,
                 rng_width=8,
                 rtype=torch.float,
                 stype=torch.float,
                 btype=torch.float):
        super(expComb, self).__init__()

        self.bitwidth = rng_width
        self.mode = mode
        self.rng = rng
        self.rng_dim = rng_dim
        self.rtype = rtype
        self.stype = stype
        self.btype = btype

        # If a unit is named as n_x, it will be used in the calculation of n_x+1.
        self.rng_1 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_2 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 1,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_3 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 2,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_4 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 3,
                         rng=self.rng,
                         rtype=self.rtype)()

        # constants used in computation
        self.n1_c = torch.tensor([0.2000]).type(self.rtype)
        self.sg_n1_c = SourceGen(self.n1_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n1_c = BSGen(self.sg_n1_c, self.rng_1, self.stype)

        self.n2_c = torch.tensor([0.2500]).type(self.rtype)
        self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)

        self.n3_c = torch.tensor([0.3333]).type(self.rtype)
        self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)

        self.n4_c = torch.tensor([0.5000]).type(self.rtype)
        self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)

        self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                         requires_grad=False)

        # dff to prevent correlation
        self.input_d1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                           requires_grad=False)
        self.input_d2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                           requires_grad=False)
        self.input_d3 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                           requires_grad=False)
        self.input_d4 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                           requires_grad=False)
Example #14
0
    def __init__(self,
                 mode="unipolar",
                 rng="Sobol",
                 rng_dim=1,
                 rng_width=8,
                 rtype=torch.float,
                 stype=torch.float,
                 btype=torch.float):
        super(expN1, self).__init__()

        self.bitwidth = rng_width
        self.mode = mode
        self.rng = rng
        self.rng_dim = rng_dim
        self.rtype = rtype
        self.stype = stype
        self.btype = btype

        assert mode is "unipolar", "Combinational expN1 needs unipolar mode."
        self.rng_1 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 0,
                         rng=self.rng,
                         rtype=self.rtype)()
        self.rng_2 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 1,
                         rng=self.rng,
                         rtype=self.rtype)()
        self.rng_3 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 2,
                         rng=self.rng,
                         rtype=self.rtype)()
        self.rng_4 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 3,
                         rng=self.rng,
                         rtype=self.rtype)()

        # constants used in computation
        self.n1_c = torch.tensor([0.2000]).type(self.rtype)
        self.n2_c = torch.tensor([0.2500]).type(self.rtype)
        self.n3_c = torch.tensor([0.3333]).type(self.rtype)
        self.n4_c = torch.tensor([0.5000]).type(self.rtype)

        self.sg_n1_c = SourceGen(self.n1_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode,
                                 self.rtype)()

        self.bs_n1_c = BSGen(self.sg_n1_c, self.rng_1, self.stype)
        self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)
        self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)
        self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)

        # dff as isolation to mitigate correlation
        self.input_d1 = torch.nn.Parameter(torch.zeros(1).type(self.stype),
                                           requires_grad=False)
        self.input_d2 = torch.nn.Parameter(torch.zeros(1).type(self.stype),
                                           requires_grad=False)
        self.input_d3 = torch.nn.Parameter(torch.zeros(1).type(self.stype),
                                           requires_grad=False)
        self.input_d4 = torch.nn.Parameter(torch.zeros(1).type(self.stype),
                                           requires_grad=False)

        self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                         requires_grad=False)
Example #15
0
    def __init__(self,
                 mode="unipolar",
                 rng="Sobol",
                 rng_dim=1,
                 rng_width=8,
                 rtype=torch.float,
                 stype=torch.float,
                 btype=torch.float):
        super(tanhComb, self).__init__()

        self.bitwidth = rng_width
        self.mode = mode
        self.rng = rng
        self.rng_dim = rng_dim
        self.rtype = rtype
        self.stype = stype
        self.btype = btype

        # If a unit is named as n_x, it will be used in the calculation of n_x+1.
        self.rng_2 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_3 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 1,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_4 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 2,
                         rng=self.rng,
                         rtype=self.rtype)()

        self.rng_5 = RNG(bitwidth=self.bitwidth,
                         dim=self.rng_dim + 3,
                         rng=self.rng,
                         rtype=self.rtype)()

        # constants used in computation
        self.n2_c = torch.tensor([62 / 153]).type(self.rtype)
        self.sg_n2_c = SourceGen(self.n2_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n2_c = BSGen(self.sg_n2_c, self.rng_2, self.stype)

        self.n3_c = torch.tensor([17 / 42]).type(self.rtype)
        self.sg_n3_c = SourceGen(self.n3_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n3_c = BSGen(self.sg_n3_c, self.rng_3, self.stype)

        self.n4_c = torch.tensor([2 / 5]).type(self.rtype)
        self.sg_n4_c = SourceGen(self.n4_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n4_c = BSGen(self.sg_n4_c, self.rng_4, self.stype)

        self.n5_c = torch.tensor([1 / 3]).type(self.rtype)
        self.sg_n5_c = SourceGen(self.n5_c, self.bitwidth, self.mode,
                                 self.rtype)()
        self.bs_n5_c = BSGen(self.sg_n5_c, self.rng_5, self.stype)

        self.bs_idx = torch.nn.Parameter(torch.zeros(1).type(torch.long),
                                         requires_grad=False)

        # dff to prevent correlation

        # 4 dff in series
        self.input_4d1_1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d2_1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d3_1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d4_1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)

        self.d1 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                     requires_grad=False)
        self.d2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                     requires_grad=False)
        self.d3 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                     requires_grad=False)

        # 4 dff in series
        self.input_4d1_2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d2_2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d3_2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
        self.input_4d4_2 = torch.nn.Parameter(torch.zeros(1).type(self.btype),
                                              requires_grad=False)
Example #16
0
    def __init__(self, 
                 in_features, 
                 out_features, 
                 weight_r=None, 
                 weight_i=None, 
                 bitwidth=8, 
                 scaled=False,
                 btype=torch.float,
                 rtype=torch.float,
                 stype=torch.float):
        super(UnaryLinearComplex, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.scaled = scaled
        self.stype = stype
        
        # upper bound for accumulation counter in scaled mode
        self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.acc_bound.add_(in_features*2)
        
        # accumulation offset for bipolar
        self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.offset.add_((in_features*2-1)/2)
        
        # data bit width
        self.bitwidth = bitwidth
        
        # random_sequence from sobol RNG, only one type of RNG is required
        self.rng = RNG(self.bitwidth, 1, "Sobol", rtype)()
        
        # define the convolution weight and bias
        self.buf_wght_r = SourceGen(weight_r, bitwidth=self.bitwidth, mode="bipolar", rtype=rtype)()
        self.buf_wght_i = SourceGen(weight_i, bitwidth=self.bitwidth, mode="bipolar", rtype=rtype)()

        # define the kernel linear for different parts
        # 1. real feature and real weight
        self.kernel_1_fr_wr       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_1_fr_wr  = BSGen(self.buf_wght_r, self.rng, stype=stype)
        self.rng_wght_idx_1_fr_wr = torch.nn.Parameter(torch.zeros_like(self.kernel_1_fr_wr.weight, 
                                                                        dtype=torch.long), requires_grad=False)

        self.kernel_0_fr_wr       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_0_fr_wr  = BSGen(self.buf_wght_r, self.rng, stype=stype)
        self.rng_wght_idx_0_fr_wr = torch.nn.Parameter(torch.zeros_like(self.kernel_0_fr_wr.weight, 
                                                                        dtype=torch.long), requires_grad=False)
        
        # 2. real feature and image weight
        self.kernel_1_fr_wi       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_1_fr_wi  = BSGen(self.buf_wght_i, self.rng, stype=stype)
        self.rng_wght_idx_1_fr_wi = torch.nn.Parameter(torch.zeros_like(self.kernel_1_fr_wi.weight, 
                                                                        dtype=torch.long), requires_grad=False)

        self.kernel_0_fr_wi       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_0_fr_wi  = BSGen(self.buf_wght_i, self.rng, stype=stype)
        self.rng_wght_idx_0_fr_wi = torch.nn.Parameter(torch.zeros_like(self.kernel_0_fr_wi.weight, 
                                                                        dtype=torch.long), requires_grad=False)
        
        # 3. image feature and real weight
        self.kernel_1_fi_wr       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_1_fi_wr  = BSGen(self.buf_wght_r, self.rng, stype=stype)
        self.rng_wght_idx_1_fi_wr = torch.nn.Parameter(torch.zeros_like(self.kernel_1_fi_wr.weight, 
                                                                        dtype=torch.long), requires_grad=False)

        self.kernel_0_fi_wr       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_0_fi_wr  = BSGen(self.buf_wght_r, self.rng, stype=stype)
        self.rng_wght_idx_0_fi_wr = torch.nn.Parameter(torch.zeros_like(self.kernel_0_fi_wr.weight, 
                                                                        dtype=torch.long), requires_grad=False)
        
        # 4. image feature and image weight
        self.kernel_1_fi_wi       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_1_fi_wi  = BSGen(self.buf_wght_i, self.rng, stype=stype)
        self.rng_wght_idx_1_fi_wi = torch.nn.Parameter(torch.zeros_like(self.kernel_1_fi_wi.weight, 
                                                                        dtype=torch.long), requires_grad=False)

        self.kernel_0_fi_wi       = torch.nn.Linear(self.in_features, self.out_features, bias=False)
        self.buf_wght_bs_0_fi_wi  = BSGen(self.buf_wght_i, self.rng, stype=stype)
        self.rng_wght_idx_0_fi_wi = torch.nn.Parameter(torch.zeros_like(self.kernel_0_fi_wi.weight, 
                                                                        dtype=torch.long), requires_grad=False)
        
        # define the accumulator for real and image parts of output
        # real
        self.accumulator_r = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.out_accumulator_r = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        # image
        self.accumulator_i = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
        self.out_accumulator_i = torch.nn.Parameter(torch.zeros(1), requires_grad=False)