コード例 #1
0
    def forward(ctx, input, weight, bias=None, conc_weight=None, use_kernel=False, use_cuda=True):
        fraction_bits = 16
        integer_bit = 16

        shift, sign = utils.get_shift_and_sign(weight)
   
        if use_kernel:
            input_fixed_point = (input * (2 ** fraction_bits)).int()
            if bias is not None:
                bias_fixed_point = (bias * (2 ** fraction_bits)).int()

            out = deepshift.kernels.linear(input_fixed_point, shift, sign, bias_fixed_point, conc_weight, use_cuda)
            out = out.float()
            out = out / (2**fraction_bits)
        else:
            input.data = round_to_fixed(input.data, fraction_bits, integer_bit)
            if bias is not None:
                bias.data = round_to_fixed(bias.data, fraction_bits, integer_bit)

            weight_s = (2.0 ** shift) * sign
            out = input.mm(weight_s.t())
            if bias is not None:
                out += bias.unsqueeze(0).expand_as(out)

            ctx.save_for_backward(input, weight_s, bias)

        return out
コード例 #2
0
    def forward(ctx, input, weight, bias=None, conc_weight=None, stride=1, padding=0, dilation=1, groups=1, use_kernel=False, use_cuda=False):
        fraction_bits = 16
        integer_bits = 16

        shift, sign = utils.get_shift_and_sign(weight)

        if use_kernel:
            input_fixed_point = (input * (2 ** fraction_bits)).int()
            if bias is not None:
                bias_fixed_point = (bias * (2 ** fraction_bits)).int()
            else:
                bias_fixed_point = None

            out = deepshift.kernels.conv2d(input_fixed_point, shift, sign, bias_fixed_point, conc_weight, stride, padding, dilation, groups, use_cuda)

            out = out.float()
            out = out / (2**fraction_bits)   
        else:
            weight_s = (2.0 ** shift) * sign
            out = F.conv2d(input, weight_s, bias, stride, padding, dilation, groups)

            ctx.save_for_backward(input, weight_s, bias)
            ctx.stride = stride
            ctx.padding = padding 
            ctx.dilation = dilation
            ctx.groups = groups

        return out
コード例 #3
0
ファイル: convert.py プロジェクト: saisuryap/DeepShift
def convert_to_shift(model,
                     shift_depth,
                     shift_type,
                     shift_base=2,
                     convert_all_linear=True,
                     convert_weights=False,
                     freeze_sign=False,
                     use_kernel=False,
                     use_cuda=True,
                     rounding='deterministic',
                     weight_bits=5):
    conversion_count = 0
    for name, module in reversed(model._modules.items()):
        if len(list(module.children())) > 0:
            # recurse
            model._modules[name], num_converted = convert_to_shift(
                model=module,
                shift_depth=shift_depth - conversion_count,
                shift_type=shift_type,
                shift_base=shift_base,
                convert_all_linear=convert_all_linear,
                convert_weights=convert_weights,
                freeze_sign=freeze_sign,
                use_kernel=use_kernel,
                use_cuda=use_cuda,
                rounding=rounding,
                weight_bits=weight_bits)
            conversion_count += num_converted
        if type(module) == nn.Linear and (convert_all_linear == True
                                          or conversion_count < shift_depth):
            linear = module

            if shift_type == 'Q':
                shift_linear = deepshift.modules_q.LinearShiftQ(
                    module.in_features,
                    module.out_features,
                    shift_base,
                    module.bias is not None,
                    use_kernel=use_kernel,
                    use_cuda=use_cuda,
                    rounding=rounding,
                    weight_bits=weight_bits)
                shift_linear.weight = linear.weight
                if linear.bias is not None:
                    shift_linear.bias.data = utils.round_to_fixed(
                        linear.bias, fraction_bits=16, integer_bits=16)

                if use_cuda == True and use_kernel == True:
                    shift_linear.conc_weight = utils.compress_bits(
                        *utils.get_shift_and_sign(linear.weight, shift_base))
            elif shift_type == 'PS':
                shift_linear = deepshift.modules.LinearShift(
                    module.in_features,
                    module.out_features,
                    shift_base,
                    module.bias is not None,
                    freeze_sign=freeze_sign,
                    use_kernel=use_kernel,
                    use_cuda=use_cuda,
                    rounding=rounding,
                    weight_bits=weight_bits)

                if convert_weights == True:
                    shift_linear.shift.data, shift_linear.sign.data = utils.get_shift_and_sign(
                        linear.weight, shift_base)
                    shift_linear.bias = linear.bias

                    if use_cuda == True and use_kernel == True:
                        shift_linear.conc_weight = utils.compress_bits(
                            shift_linear.shift.data, shift_linear.sign.data)
            else:
                raise ValueError('Unsupported shift_type argument: ',
                                 shift_type)

            model._modules[name] = shift_linear
            if convert_all_linear == False:
                conversion_count += 1

        if type(module) == nn.Conv2d and conversion_count < shift_depth:
            conv2d = module

            if shift_type == 'Q':
                shift_conv2d = deepshift.modules_q.Conv2dShiftQ(
                    module.in_channels,
                    module.out_channels,
                    shift_base,
                    module.kernel_size,
                    module.stride,
                    module.padding,
                    module.dilation,
                    module.groups,
                    module.bias is not None,
                    module.padding_mode,
                    use_kernel=use_kernel,
                    use_cuda=use_cuda,
                    rounding=rounding,
                    weight_bits=weight_bits)
                shift_conv2d.weight = conv2d.weight
                if conv2d.bias is not None:
                    shift_conv2d.bias.data = utils.round_to_fixed(
                        conv2d.bias, fraction_bits=16, integer_bits=16)

                if use_cuda == True and use_kernel == True:
                    shift_conv2d.conc_weight = utils.compress_bits(
                        *utils.get_shift_and_sign(conv2d.weight))

            elif shift_type == 'PS':
                shift_conv2d = deepshift.modules.Conv2dShift(
                    module.in_channels,
                    module.out_channels,
                    shift_base,
                    module.kernel_size,
                    module.stride,
                    module.padding,
                    module.dilation,
                    module.groups,
                    module.bias is not None,
                    module.padding_mode,
                    freeze_sign=freeze_sign,
                    use_kernel=use_kernel,
                    use_cuda=use_cuda,
                    rounding=rounding,
                    weight_bits=weight_bits)

                if convert_weights == True:
                    shift_conv2d.shift.data, shift_conv2d.sign.data = utils.get_shift_and_sign(
                        conv2d.weight, shift_base)
                    shift_conv2d.bias = conv2d.bias

                if use_cuda == True and use_kernel == True:
                    shift_conv2d.conc_weight = utils.compress_bits(
                        shift_conv2d.shift.data, shift_conv2d.sign.data)

            model._modules[name] = shift_conv2d
            conversion_count += 1

    return model, conversion_count