コード例 #1
0
ファイル: Conv1d.py プロジェクト: louisprimeau/MemTorch
    def __init__(self, convolutional_layer, memristor_model, memristor_model_params, mapping_routine=naive_map, transistor=False, programming_routine=None, p_l=None, scheme=memtorch.bh.Scheme.DoubleColumn, *args, **kwargs):
        assert isinstance(convolutional_layer, nn.Conv1d), 'convolutional_layer is not an instance of nn.Conv1d.'
        self.device = torch.device('cpu' if 'cpu' in memtorch.__version__ else 'cuda')
        super(Conv1d, self).__init__(convolutional_layer.in_channels, convolutional_layer.out_channels, convolutional_layer.kernel_size, **kwargs)
        self.padding = convolutional_layer.padding
        self.stride = convolutional_layer.stride
        self.weight.data = convolutional_layer.weight.data
        if convolutional_layer.bias is not None:
            self.bias.data = convolutional_layer.bias.data

        self.zero_grad()
        self.weight.requires_grad = False
        if convolutional_layer.bias is not None:
            self.bias.requires_grad = False

        self.crossbars, self.crossbar_operation = init_crossbar(weights=self.weight,
                                                               memristor_model=memristor_model,
                                                               memristor_model_params=memristor_model_params,
                                                               transistor=transistor,
                                                               mapping_routine=mapping_routine,
                                                               programming_routine=programming_routine,
                                                               p_l=p_l,
                                                               scheme=scheme)
        self.transform_output = lambda x: x
        print('Patched %s -> %s' % (convolutional_layer, self))
コード例 #2
0
ファイル: Linear.py プロジェクト: jeshraghian/MemTorch
    def __init__(self, linear_layer, memristor_model, memristor_model_params, mapping_routine=naive_map, transistor=True, programming_routine=None, programming_routine_params={}, p_l=None, scheme=memtorch.bh.Scheme.DoubleColumn, **kwargs):
        assert isinstance(linear_layer, nn.Linear), 'linear_layer is not an instance of nn.Linear.'
        self.device = torch.device('cpu' if 'cpu' in memtorch.__version__ else 'cuda')
        self.scheme = scheme
        self.forward_legacy_enabled = True
        super(Linear, self).__init__(linear_layer.in_features, linear_layer.out_features, **kwargs)
        self.weight.data = linear_layer.weight.data
        if linear_layer.bias is not None:
            self.bias.data = linear_layer.bias.data
        else:
            self.bias = None

        self.zero_grad()
        self.weight.requires_grad = False
        if linear_layer.bias is not None:
            self.bias.requires_grad = False

        self.crossbars, self.crossbar_operation = init_crossbar(weights=self.weight,
                                                               memristor_model=memristor_model,
                                                               memristor_model_params=memristor_model_params,
                                                               transistor=transistor,
                                                               mapping_routine=mapping_routine,
                                                               programming_routine=programming_routine,
                                                               programming_routine_params=programming_routine_params,
                                                               p_l=p_l,
                                                               scheme=scheme)
        self.transform_output = lambda x: x
        print('Patched %s -> %s' % (linear_layer, self))
コード例 #3
0
    def __init__(self,
                 convolutional_layer,
                 memristor_model,
                 memristor_model_params,
                 mapping_routine=naive_map,
                 transistor=True,
                 programming_routine=None,
                 programming_routine_params={},
                 p_l=None,
                 scheme=memtorch.bh.Scheme.DoubleColumn,
                 tile_shape=None,
                 max_input_voltage=None,
                 ADC_resolution=None,
                 ADC_overflow_rate=0.,
                 quant_method=None,
                 verbose=True,
                 *args,
                 **kwargs):
        assert isinstance(
            convolutional_layer,
            nn.Conv3d), 'convolutional_layer is not an instance of nn.Conv3d.'
        self.device = torch.device('cpu' if 'cpu' in
                                   memtorch.__version__ else 'cuda')
        self.scheme = scheme
        self.tile_shape = tile_shape
        self.max_input_voltage = max_input_voltage
        self.ADC_resolution = ADC_resolution
        self.ADC_overflow_rate = ADC_overflow_rate
        if quant_method in memtorch.bh.Quantize.quant_methods:
            self.quant_method = quant_method
        else:
            self.quant_method = None

        if quant_method is not None:
            assert ADC_resolution is not None and type(
                ADC_resolution
            ) == int and ADC_resolution > 0, 'ADC resolution is invalid.'
            assert ADC_overflow_rate is not None, 'ADC_overflow_rate must be specified if quant_method is not None.'

        self.verbose = verbose
        self.forward_legacy_enabled = True
        super(Conv3d, self).__init__(convolutional_layer.in_channels,
                                     convolutional_layer.out_channels,
                                     convolutional_layer.kernel_size, **kwargs)
        self.padding = convolutional_layer.padding
        self.stride = convolutional_layer.stride
        self.weight.data = convolutional_layer.weight.data
        if convolutional_layer.bias is not None:
            self.bias.data = convolutional_layer.bias.data

        self.zero_grad()
        self.weight.requires_grad = False
        if convolutional_layer.bias is not None:
            self.bias.requires_grad = False

        self.crossbars, self.crossbar_operation = init_crossbar(
            weights=self.weight,
            memristor_model=memristor_model,
            memristor_model_params=memristor_model_params,
            transistor=transistor,
            mapping_routine=mapping_routine,
            programming_routine=programming_routine,
            programming_routine_params=programming_routine_params,
            p_l=p_l,
            scheme=scheme,
            tile_shape=tile_shape)
        self.transform_output = lambda x: x
        if verbose:
            print('Patched %s -> %s' % (convolutional_layer, self))
コード例 #4
0
    def __init__(self,
                 linear_layer,
                 memristor_model,
                 memristor_model_params,
                 mapping_routine=naive_map,
                 transistor=True,
                 programming_routine=None,
                 programming_routine_params={},
                 p_l=None,
                 scheme=memtorch.bh.Scheme.DoubleColumn,
                 tile_shape=None,
                 max_input_voltage=None,
                 ADC_resolution=None,
                 ADC_overflow_rate=0.0,
                 quant_method=None,
                 verbose=True,
                 *args,
                 **kwargs):
        assert isinstance(
            linear_layer,
            nn.Linear), "linear_layer is not an instance of nn.Linear."
        self.device = torch.device("cpu" if "cpu" in
                                   memtorch.__version__ else "cuda")
        self.scheme = scheme
        self.tile_shape = tile_shape
        self.max_input_voltage = max_input_voltage
        self.ADC_resolution = ADC_resolution
        self.ADC_overflow_rate = ADC_overflow_rate
        if quant_method in memtorch.bh.Quantize.quant_methods:
            self.quant_method = quant_method
        else:
            self.quant_method = None

        if quant_method is not None:
            assert (ADC_resolution is not None and type(ADC_resolution) == int
                    and ADC_resolution > 0), "ADC resolution is invalid."
            assert (
                ADC_overflow_rate is not None
            ), "ADC_overflow_rate must be specified if quant_method is not None."

        self.verbose = verbose
        self.forward_legacy_enabled = True
        super(Linear, self).__init__(linear_layer.in_features,
                                     linear_layer.out_features, **kwargs)
        self.weight.data = linear_layer.weight.data
        if linear_layer.bias is not None:
            self.bias.data = linear_layer.bias.data
        else:
            self.bias = None

        self.zero_grad()
        self.weight.requires_grad = False
        if linear_layer.bias is not None:
            self.bias.requires_grad = False

        self.crossbars, self.crossbar_operation = init_crossbar(
            weights=self.weight,
            memristor_model=memristor_model,
            memristor_model_params=memristor_model_params,
            transistor=transistor,
            mapping_routine=mapping_routine,
            programming_routine=programming_routine,
            programming_routine_params=programming_routine_params,
            p_l=p_l,
            scheme=scheme,
            tile_shape=tile_shape,
        )
        self.transform_output = lambda x: x
        if verbose:
            print("Patched %s -> %s" % (linear_layer, self))