예제 #1
0
def round_shift_weights(model, clone=False):

    # for name, param in model.named_parameters():
    #     if param.is_leaf:
    #         print(name)
    model.eval()
    if(clone):
        model = copy.deepcopy(model)

    for name, module in reversed(model._modules.items()):
        if len(list(module.children())) > 0:
            # recurse
            model._modules[name] = round_shift_weights(model=module)

        if type(module) == deepshift.modules.LinearShift or type(module) == deepshift.modules.Conv2dShift:
            module.shift.data = module.shift.round()
            module.sign.data = module.sign.round().sign()

            if (module.bias is not None):
                module.bias.data = utils.round_to_fixed(module.bias, fraction=16, integer=16)
        elif type(module) == deepshift.modules_q.LinearShiftQ or type(module) == deepshift.modules_q.Conv2dShiftQ:
            module.weight.data = utils.round_power_of_2(module.weight)

            if (module.bias is not None):
                module.bias.data = utils.round_to_fixed(module.bias, fraction=16, integer=16)

    return model
예제 #2
0
def round_shift_weights(model, shift_base=2, clone=False):
    if (clone):
        model = copy.deepcopy(model)

    for name, module in reversed(model._modules.items()):
        if len(list(module.children())) > 0:
            # recurse
            model._modules[name] = round_shift_weights(model=module,
                                                       shift_base=shift_base)

        if type(module) == deepshift.modules.LinearShift or type(
                module) == deepshift.modules.Conv2dShift:
            module.shift.data = module.shift.round()
            module.sign.data = module.sign.round().sign()

            if (module.bias is not None):
                module.bias.data = utils.round_to_fixed(module.bias,
                                                        fraction_bits=16,
                                                        integer_bits=16)
        elif type(module) == deepshift.modules_q.LinearShiftQ or type(
                module) == deepshift.modules_q.Conv2dShiftQ:
            module.weight.data = utils.round_power_of_2(
                module.weight, shift_base)

            if (module.bias is not None):
                module.bias.data = utils.round_to_fixed(module.bias,
                                                        fraction_bits=16,
                                                        integer_bits=16)

    return model
예제 #3
0
    def forward(ctx, input, shift, sign, shift_base=2, bias=None, conc_weight=None, use_kernel=False, use_cuda=True, rounding='deterministic', shift_range=(-14, 0)):
        fraction_bits = 16
        integer_bit = 16
        if use_kernel:
            input_fixed_point = (input * (2 ** fraction_bits)).int()
            if bias is not None:
                bias_fixed_point = (bias * (2 ** fraction_bits)).int()

            out = deepshift.kernels.linear(input_fixed_point, shift, sign, bias_fixed_point, conc_weight, use_cuda)
            out = out.float()
            out = out / (2**fraction_bits)
        else:
            sign = sign.clamp(-1,1)
            shift = shift.clamp(*shift_range)
            input.data = utils.round_to_fixed(input.data, fraction_bits, integer_bit)
            if bias is not None:
                bias.data = utils.round_to_fixed(bias.data, fraction_bits, integer_bit)

            v = shift_base**shift.round() * sign.round().sign()
            out = input.mm(v.t())
            if bias is not None:
                out += bias.unsqueeze(0).expand_as(out)
            
            ctx.save_for_backward(input, shift, sign, shift_base, bias, v)
        return out
예제 #4
0
    def forward(ctx,
                input,
                shift,
                sign,
                bias=None,
                conc_weight=None,
                stride=1,
                padding=0,
                dilation=1,
                groups=1,
                use_kernel=False,
                use_cuda=False,
                rounding='deterministic',
                shift_range=(-14, 0)):
        fraction_bits = 16
        integer_bits = 16

        # start_time = time.time()
        if use_kernel:
            input_fixed_point = (input * (2**fraction_bits)).int()
            if bias is not None:
                bias_fixed_point = (bias * (2**fraction_bits)).int()
            else:
                bias_fixed_point = None

            out = deepshift.kernels.conv2d(input_fixed_point, shift, sign,
                                           bias_fixed_point, conc_weight,
                                           stride, padding, dilation, groups,
                                           use_cuda)

            out = out.float()
            out = out / (2**fraction_bits)
        else:
            shift = shift.clamp(*shift_range)
            sign = sign.clamp(-1, 1)
            input.data = utils.round_to_fixed(input.data, fraction_bits,
                                              integer_bits)

            if bias is not None:
                bias.data = utils.round_to_fixed(bias.data, fraction_bits,
                                                 integer_bits)

            # shift_rounded = utils.round(self.shift, stochastic=False)
            # sign_rounded_signed = torch.sign(utils.round(self.sign, stochastic=False))

            shift_rounded = utils.round(shift, stochastic=False)
            sign_rounded_signed = torch.sign(
                utils.round(sign, stochastic=False))
            v = 2**shift_rounded * sign_rounded_signed
            out = F.conv2d(input, v, bias, stride, padding, dilation, groups)

            ctx.save_for_backward(input, shift, sign, bias, v)
            ctx.stride = stride
            ctx.padding = padding
            ctx.dilation = dilation
            ctx.groups = groups

        return out
예제 #5
0
    def forward(ctx,
                input,
                shift,
                sign,
                bias=None,
                conc_weight=None,
                stride=1,
                padding=0,
                dilation=1,
                groups=1,
                use_kernel=False,
                use_cuda=False):
        fraction_bits = 16
        integer_bits = 16

        # start_time = time.time()
        if use_kernel:
            input_fixed_point = (input * (2**fraction_bits)).int()
            if bias is not None:
                bias_fixed_point = (bias * (2**fraction_bits)).int()
            else:
                bias_fixed_point = None

            out = deepshift.kernels.conv2d(input_fixed_point, shift, sign,
                                           bias_fixed_point, conc_weight,
                                           stride, padding, dilation, groups,
                                           use_cuda)

            out = out.float()
            out = out / (2**fraction_bits)
        else:
            sign = sign.clamp(-1, 1)
            input.data = utils.round_to_fixed(input.data, fraction_bits,
                                              integer_bits)

            if bias is not None:
                bias.data = utils.round_to_fixed(bias.data, fraction_bits,
                                                 integer_bits)

            v = 2**shift.round() * sign.round().sign()
            out = F.conv2d(input, v, bias, stride, padding, dilation, groups)

            ctx.save_for_backward(input, shift, sign, bias, v)
            ctx.stride = stride
            ctx.padding = padding
            ctx.dilation = dilation
            ctx.groups = groups

        return out
예제 #6
0
    def forward(ctx,
                input,
                shift,
                sign,
                bias=None,
                conc_weight=None,
                use_kernel=False,
                use_cuda=True,
                rounding='deterministic',
                shift_range=(-14, 0),
                act_integer_bits=16,
                act_fraction_bits=16):
        if use_kernel:
            input_fixed_point = (input * (2**act_fraction_bits)).int()
            if bias is not None:
                bias_fixed_point = (bias * (2**act_fraction_bits)).int()

            out = deepshift.kernels.linear(input_fixed_point, shift, sign,
                                           bias_fixed_point, conc_weight,
                                           use_cuda)
            out = out.float()
            out = out / (2**act_fraction_bits)
        else:
            sign = sign.clamp(-1, 1)
            shift = shift.clamp(*shift_range)
            input.data = utils.round_to_fixed(input.data, act_integer_bits,
                                              act_fraction_bits)
            # FIXME: don't modify bias.data, but instead create a new tensor?
            if bias is not None:
                bias.data = utils.round_to_fixed(bias.data, act_integer_bits,
                                                 act_fraction_bits)

            v = 2**shift.round() * sign.round().sign()
            out = input.mm(v.t())
            if bias is not None:
                out += bias.unsqueeze(0).expand_as(out)

            ctx.save_for_backward(input, shift, sign, bias, v)

        return out
예제 #7
0
def round_shift_weights(model, clone=False, weight_bits=5, act_integer_bits=16, act_fraction_bits=16):
    if(clone):
        model = copy.deepcopy(model)

    for name, module in reversed(model._modules.items()):
        if len(list(module.children())) > 0:
            # recurse
            model._modules[name] = round_shift_weights(model=module, weight_bits=weight_bits, act_integer_bits=act_integer_bits, act_fraction_bits=act_fraction_bits)

        if type(module) == deepshift.modules.LinearShift or type(module) == deepshift.modules.Conv2dShift:
            module.shift.data = module.shift.round()
            module.sign.data = module.sign.round().sign()

            if (module.bias is not None):
                module.bias.data = utils.round_to_fixed(module.bias, integer_bits=act_integer_bits, fraction_bits=act_fraction_bits)
        elif type(module) == deepshift.modules_q.LinearShiftQ or type(module) == deepshift.modules_q.Conv2dShiftQ:
            module.weight.data = utils.clampabs(module.weight.data, 2**module.shift_range[0], 2**module.shift_range[1]) 
            module.weight.data = utils.round_power_of_2(module.weight)

            if (module.bias is not None):
                module.bias.data = utils.round_to_fixed(module.bias, integer_bits=act_integer_bits, fraction_bits=act_fraction_bits)

    return model
예제 #8
0
def convert_to_shift(model,
                     shift_depth,
                     shift_type,
                     shift_base=2,
                     convert_all_linear=True,
                     convert_weights=False,
                     freeze_sign=False,
                     use_kernel=False,
                     use_cuda=True,
                     rounding='deterministic',
                     weight_bits=5):
    conversion_count = 0
    for name, module in reversed(model._modules.items()):
        if len(list(module.children())) > 0:
            # recurse
            model._modules[name], num_converted = convert_to_shift(
                model=module,
                shift_depth=shift_depth - conversion_count,
                shift_type=shift_type,
                shift_base=shift_base,
                convert_all_linear=convert_all_linear,
                convert_weights=convert_weights,
                freeze_sign=freeze_sign,
                use_kernel=use_kernel,
                use_cuda=use_cuda,
                rounding=rounding,
                weight_bits=weight_bits)
            conversion_count += num_converted
        if type(module) == nn.Linear and (convert_all_linear == True
                                          or conversion_count < shift_depth):
            linear = module

            if shift_type == 'Q':
                shift_linear = deepshift.modules_q.LinearShiftQ(
                    module.in_features,
                    module.out_features,
                    shift_base,
                    module.bias is not None,
                    use_kernel=use_kernel,
                    use_cuda=use_cuda,
                    rounding=rounding,
                    weight_bits=weight_bits)
                shift_linear.weight = linear.weight
                if linear.bias is not None:
                    shift_linear.bias.data = utils.round_to_fixed(
                        linear.bias, fraction_bits=16, integer_bits=16)

                if use_cuda == True and use_kernel == True:
                    shift_linear.conc_weight = utils.compress_bits(
                        *utils.get_shift_and_sign(linear.weight, shift_base))
            elif shift_type == 'PS':
                shift_linear = deepshift.modules.LinearShift(
                    module.in_features,
                    module.out_features,
                    shift_base,
                    module.bias is not None,
                    freeze_sign=freeze_sign,
                    use_kernel=use_kernel,
                    use_cuda=use_cuda,
                    rounding=rounding,
                    weight_bits=weight_bits)

                if convert_weights == True:
                    shift_linear.shift.data, shift_linear.sign.data = utils.get_shift_and_sign(
                        linear.weight, shift_base)
                    shift_linear.bias = linear.bias

                    if use_cuda == True and use_kernel == True:
                        shift_linear.conc_weight = utils.compress_bits(
                            shift_linear.shift.data, shift_linear.sign.data)
            else:
                raise ValueError('Unsupported shift_type argument: ',
                                 shift_type)

            model._modules[name] = shift_linear
            if convert_all_linear == False:
                conversion_count += 1

        if type(module) == nn.Conv2d and conversion_count < shift_depth:
            conv2d = module

            if shift_type == 'Q':
                shift_conv2d = deepshift.modules_q.Conv2dShiftQ(
                    module.in_channels,
                    module.out_channels,
                    shift_base,
                    module.kernel_size,
                    module.stride,
                    module.padding,
                    module.dilation,
                    module.groups,
                    module.bias is not None,
                    module.padding_mode,
                    use_kernel=use_kernel,
                    use_cuda=use_cuda,
                    rounding=rounding,
                    weight_bits=weight_bits)
                shift_conv2d.weight = conv2d.weight
                if conv2d.bias is not None:
                    shift_conv2d.bias.data = utils.round_to_fixed(
                        conv2d.bias, fraction_bits=16, integer_bits=16)

                if use_cuda == True and use_kernel == True:
                    shift_conv2d.conc_weight = utils.compress_bits(
                        *utils.get_shift_and_sign(conv2d.weight))

            elif shift_type == 'PS':
                shift_conv2d = deepshift.modules.Conv2dShift(
                    module.in_channels,
                    module.out_channels,
                    shift_base,
                    module.kernel_size,
                    module.stride,
                    module.padding,
                    module.dilation,
                    module.groups,
                    module.bias is not None,
                    module.padding_mode,
                    freeze_sign=freeze_sign,
                    use_kernel=use_kernel,
                    use_cuda=use_cuda,
                    rounding=rounding,
                    weight_bits=weight_bits)

                if convert_weights == True:
                    shift_conv2d.shift.data, shift_conv2d.sign.data = utils.get_shift_and_sign(
                        conv2d.weight, shift_base)
                    shift_conv2d.bias = conv2d.bias

                if use_cuda == True and use_kernel == True:
                    shift_conv2d.conc_weight = utils.compress_bits(
                        shift_conv2d.shift.data, shift_conv2d.sign.data)

            model._modules[name] = shift_conv2d
            conversion_count += 1

    return model, conversion_count
예제 #9
0
파일: ste.py 프로젝트: saisuryap/DeepShift
 def forward(ctx, input):
     return utils.round_to_fixed(input)
예제 #10
0
 def forward(ctx, input, quant_bits):
     return utils.round_to_fixed(input, fraction=quant_bits)
예제 #11
0
 def forward(ctx, input, act_integer_bits=16, act_fraction_bits=16):
     return utils.round_to_fixed(input, act_integer_bits, act_fraction_bits)