示例#1
0
def test_crossbar(shape):
    memristor_model = memtorch.bh.memristor.LinearIonDrift
    memristor_model_params = {'time_series_resolution': 1e-3}
    crossbar = memtorch.bh.crossbar.Crossbar(memristor_model, memristor_model_params, shape)
    conductance_matrix = naive_map(torch.zeros(shape).uniform_(0, 1),
                                   memristor_model().r_on, memristor_model().r_off,
                                   memtorch.bh.crossbar.Scheme.SingleColumn)
    crossbar.write_conductance_matrix(conductance_matrix)
    if sys.version_info > (3, 6):
        crossbar.update(from_devices=False, parallelize=True)
        assert torch.all(torch.isclose(conductance_matrix.T[:, :], crossbar.conductance_matrix.cpu()[:, :], atol=1e-5))
        assert crossbar.devices[0][0].g == crossbar.conductance_matrix[0][0].item()

    crossbar.update(from_devices=False, parallelize=False)
    assert crossbar.devices[0][0].g == crossbar.conductance_matrix[0][0].item()
    inputs = torch.zeros(shape).uniform_(0, 1)
    assert torch.all(torch.isclose(simulate_matmul(inputs, crossbar.devices).float(),
                     torch.matmul(inputs, conductance_matrix.T).float(), rtol=1e-2))
    assert torch.all(torch.isclose(simulate_matmul(inputs, crossbar.devices, parallelize=True).float(),
                     torch.matmul(inputs, conductance_matrix.T).float(), rtol=1e-2))
    programming_signal = gen_programming_signal(1, 1e-2, 1e-2, 1, memristor_model_params['time_series_resolution'])
    assert type(programming_signal) == tuple
    with pytest.raises(AssertionError):
        gen_programming_signal(1, 1e-4, 1e-4, 1, memristor_model_params['time_series_resolution'])

    point = (0, 0)
    row, column = point
    conductance_to_program = random.uniform(1 / crossbar.devices[row][column].r_off, 1 / crossbar.devices[row][column].r_on)
    crossbar.devices = naive_program(crossbar, (row, column), conductance_to_program, rel_tol=0.01)
    assert math.isclose(conductance_to_program, crossbar.devices[row][column].g, abs_tol=1e-4)
    with pytest.raises(AssertionError):
        naive_program(crossbar, (row, column), -1)
示例#2
0
    def forward(self, input):
        """Method to perform forward propagations.

            Parameters
            ----------
            input : torch.Tensor
                Input tensor.

            Returns
            -------
            torch.Tensor
                Output tensor.
        """
        if self.forward_legacy_enabled:
            out = torch.matmul(input.to(self.device),
                               self.weight.data.T.to(self.device))
            if not self.bias is None:
                out += self.bias.view(1, -1).expand_as(out)

            return out
        else:
            if hasattr(self, 'non_linear'):
                input = convert_range(input, input.min(), input.max(), -1, 1)
                input = input.cpu().detach().numpy()
                if hasattr(self, 'simulate'):
                    out = torch.tensor(
                        self.transform_output(
                            self.crossbar_operation(
                                self.crossbars,
                                lambda crossbar, input: simulate_matmul(
                                    input, crossbar.devices, nl=False),
                                input))).to(self.device)
                else:
                    out = torch.tensor(
                        self.transform_output(
                            self.crossbar_operation(
                                self.crossbars,
                                lambda crossbar, input: simulate_matmul(
                                    input, crossbar.devices, nl=True),
                                input))).to(self.device)
            else:
                out = torch.matmul(
                    input.to(self.device),
                    self.crossbar_operation(
                        self.crossbars,
                        lambda crossbar: crossbar.conductance_matrix))

            out = self.transform_output(out)
            if not self.bias is None:
                out += self.bias.view(1, -1).expand_as(out)

            return out
示例#3
0
    def forward(self, input):
        """Method to perform forward propagations.

            Parameters
            ----------
            input : torch.Tensor
                Input tensor.

            Returns
            -------
            torch.Tensor
                Output tensor.
        """
        if self.forward_legacy_enabled:
            return torch.nn.functional.conv3d(input.to(self.device),
                                              self.weight.to(self.device),
                                              bias=self.bias,
                                              stride=self.stride,
                                              padding=self.padding)
        else:
            output_dim = [0, 0, 0]
            output_dim[0] = int(
                (input.shape[2] - self.kernel_size[0] + 2 * self.padding[0]) /
                self.stride[0]) + 1
            output_dim[1] = int(
                (input.shape[3] - self.kernel_size[1] + 2 * self.padding[1]) /
                self.stride[1]) + 1
            output_dim[2] = int(
                (input.shape[4] - self.kernel_size[2] + 2 * self.padding[2]) /
                self.stride[2]) + 1
            out = torch.zeros(
                (input.shape[0], self.out_channels, output_dim[0],
                 output_dim[1], output_dim[2])).to(self.device)
            for batch in range(input.shape[0]):
                if not all(item == 0 for item in self.padding):
                    batch_input = nn.functional.pad(
                        input[batch],
                        pad=(self.padding[2], self.padding[2], self.padding[1],
                             self.padding[1], self.padding[0],
                             self.padding[0]))
                else:
                    batch_input = input[batch]

                if self.max_input_voltage is not None:
                    assert (
                        type(self.max_input_voltage) == int
                        or type(self.max_input_voltage) == float
                    ) and self.max_input_voltage > 0, 'The maximum input voltage (max_input_voltage) must be >0.'
                    batch_input = batch_input = convert_range(
                        batch_input, batch_input.min(), batch_input.max(),
                        -self.max_input_voltage, self.max_input_voltage)

                unfolded_batch_input = batch_input.unfold(1, self.kernel_size[0], self.stride[0]).unfold(2, self.kernel_size[1], self.stride[1]).unfold(3, self.kernel_size[2], self.stride[2]) \
                    .permute(1, 2, 3, 0, 4, 5, 6).reshape(-1, self.in_channels * self.kernel_size[0] * self.kernel_size[1] * self.kernel_size[2])
                unfolded_batch_input_shape = unfolded_batch_input.shape
                if hasattr(self, 'non_linear'):
                    if self.tile_shape is not None:
                        tiles_map = self.crossbars[0].tiles_map
                        crossbar_shape = (self.crossbars[0].rows,
                                          self.crossbars[0].columns)
                    else:
                        tiles_map = None
                        crossbar_shape = None

                    if hasattr(self, 'simulate'):
                        nl = False
                    else:
                        nl = True

                    out_ = self.crossbar_operation(self.crossbars, lambda crossbar, input_: simulate_matmul(unfolded_batch_input, crossbar, nl=nl, \
                                                   tiles_map=tiles_map, crossbar_shape=crossbar_shape, max_input_voltage=self.max_input_voltage,
                                                   ADC_resolution=self.ADC_resolution, ADC_overflow_rate=self.ADC_overflow_rate,
                                                   quant_method=self.quant_method), input_=unfolded_batch_input).to(self.device).T
                else:
                    if self.tile_shape is not None:
                        unfolded_batch_input_tiles, unfolded_batch_input_tiles_map = gen_tiles(
                            unfolded_batch_input, self.tile_shape, input=True)
                        crossbar_shape = (self.crossbars[0].rows,
                                          self.crossbars[0].columns)
                        tiles_map = self.crossbars[0].tiles_map
                        out_ = tile_matmul(unfolded_batch_input_tiles, unfolded_batch_input_tiles_map, unfolded_batch_input_shape, \
                                self.crossbar_operation(self.crossbars, lambda crossbar: crossbar.conductance_matrix), tiles_map, crossbar_shape,
                                self.ADC_resolution, self.ADC_overflow_rate, self.quant_method).T
                    else:
                        out_ = torch.matmul(
                            unfolded_batch_input,
                            self.crossbar_operation(
                                self.crossbars, lambda crossbar: crossbar.
                                conductance_matrix)).T
                        if self.quant_method is not None:
                            out_ = memtorch.bh.Quantize.quantize(
                                out_,
                                bits=self.ADC_resolution,
                                overflow_rate=self.ADC_overflow_rate,
                                quant_method=self.quant_method)

                    out[batch] = out_.view(size=(1, self.out_channels,
                                                 *output_dim))

            out = self.transform_output(out)
            if not self.bias is None:
                out[batch] += self.bias.data.view(-1, 1, 1,
                                                  1).expand_as(out[batch])

            return out
示例#4
0
    def forward(self, input):
        """Method to perform forward propagations.

        Parameters
        ----------
        input : torch.Tensor
            Input tensor.

        Returns
        -------
        torch.Tensor
            Output tensor.
        """
        if self.forward_legacy_enabled:
            return torch.nn.functional.conv1d(
                input.to(self.device),
                self.weight.to(self.device),
                bias=self.bias,
                stride=self.stride,
                padding=self.padding,
            )
        else:
            output_dim = (
                int(
                    (input.shape[2] - self.kernel_size[0] + 2 * self.padding[0])
                    / self.stride[0]
                )
                + 1
            )
            out = torch.zeros((input.shape[0], self.out_channels, output_dim)).to(
                self.device
            )
            if not all(item == 0 for item in self.padding):
                input = nn.functional.pad(input, pad=(self.padding[0], self.padding[0]))

            if self.max_input_voltage is not None:
                assert (
                    type(self.max_input_voltage) == int
                    or type(self.max_input_voltage) == float
                ) and self.max_input_voltage > 0, (
                    "The maximum input voltage (max_input_voltage) must be >0."
                )
                input = convert_range(
                    input,
                    input.min(),
                    input.max(),
                    -self.max_input_voltage,
                    self.max_input_voltage,
                )

            for batch in range(input.shape[0]):
                unfolded_batch_input = (
                    input[batch]
                    .unfold(-1, size=self.kernel_size[0], step=self.stride[0])
                    .permute(1, 0, 2)
                    .reshape(-1, self.in_channels * self.kernel_size[0])
                )
                unfolded_batch_input_shape = unfolded_batch_input.shape
                if hasattr(self, "non_linear"):
                    if self.tile_shape is not None:
                        tiles_map = self.crossbars[0].tiles_map
                        crossbar_shape = (
                            self.crossbars[0].rows,
                            self.crossbars[0].columns,
                        )
                    else:
                        tiles_map = None
                        crossbar_shape = None

                    if hasattr(self, "simulate"):
                        nl = False
                    else:
                        nl = True

                    out_ = (
                        self.crossbar_operation(
                            self.crossbars,
                            lambda crossbar, input_: simulate_matmul(
                                unfolded_batch_input,
                                crossbar,
                                nl=nl,
                                tiles_map=tiles_map,
                                crossbar_shape=crossbar_shape,
                                max_input_voltage=self.max_input_voltage,
                                ADC_resolution=self.ADC_resolution,
                                ADC_overflow_rate=self.ADC_overflow_rate,
                                quant_method=self.quant_method,
                            ),
                            input_=unfolded_batch_input,
                        )
                        .to(self.device)
                        .T
                    )
                else:
                    if self.tile_shape is not None:
                        (
                            unfolded_batch_input_tiles,
                            unfolded_batch_input_tiles_map,
                        ) = gen_tiles(unfolded_batch_input, self.tile_shape, input=True)
                        crossbar_shape = (
                            self.crossbars[0].rows,
                            self.crossbars[0].columns,
                        )
                        tiles_map = self.crossbars[0].tiles_map
                        out_ = tile_matmul(
                            unfolded_batch_input_tiles,
                            unfolded_batch_input_tiles_map,
                            unfolded_batch_input_shape,
                            self.crossbar_operation(
                                self.crossbars,
                                lambda crossbar: crossbar.conductance_matrix,
                            ),
                            tiles_map,
                            crossbar_shape,
                            self.ADC_resolution,
                            self.ADC_overflow_rate,
                            self.quant_method,
                        ).T
                    else:
                        out_ = torch.matmul(
                            unfolded_batch_input,
                            self.crossbar_operation(
                                self.crossbars,
                                lambda crossbar: crossbar.conductance_matrix,
                            ),
                        ).T
                        if self.quant_method is not None:
                            out_ = memtorch.bh.Quantize.quantize(
                                out_,
                                quant=self.ADC_resolution,
                                overflow_rate=self.ADC_overflow_rate,
                                quant_method=self.quant_method,
                            )

                out[batch] = out_.view(size=(1, self.out_channels, output_dim))

            out = self.transform_output(out)
            if self.bias is not None:
                out += self.bias.view(-1, 1).expand_as(out)

            return out
示例#5
0
    def forward(self, input):
        """Method to perform forward propagations.

            Parameters
            ----------
            input : torch.Tensor
                Input tensor.

            Returns
            -------
            torch.Tensor
                Output tensor.
        """
        if self.forward_legacy_enabled:
            out = torch.matmul(input.to(self.device),
                               self.weight.data.T.to(self.device))
            if self.bias is not None:
                out += self.bias.view(1, -1).expand_as(out)

            return out
        else:
            input_shape = input.shape
            if self.max_input_voltage is not None:
                assert (
                    type(self.max_input_voltage) == int
                    or type(self.max_input_voltage) == float
                ) and self.max_input_voltage > 0, 'The maximum input voltage (max_input_voltage) must be >0.'
                input = input = convert_range(input, input.min(), input.max(),
                                              -self.max_input_voltage,
                                              self.max_input_voltage)

            if hasattr(self, 'non_linear'):
                if self.tile_shape is not None:
                    tiles_map = self.crossbars[0].tiles_map
                    crossbar_shape = self.weight.data.shape
                else:
                    tiles_map = None
                    crossbar_shape = None

                if hasattr(self, 'simulate'):
                    nl = False
                else:
                    nl = True

                out = self.crossbar_operation(self.crossbars, lambda crossbar, input_: simulate_matmul(input, crossbar, nl=nl, \
                                              tiles_map=tiles_map, crossbar_shape=crossbar_shape, max_input_voltage=self.max_input_voltage,
                                              ADC_resolution=self.ADC_resolution, ADC_overflow_rate=self.ADC_overflow_rate,
                                              quant_method=self.quant_method), input_=input).to(self.device)
            else:
                if self.tile_shape is not None:
                    input_tiles, input_tiles_map = gen_tiles(input,
                                                             self.tile_shape,
                                                             input=True)
                    crossbar_shape = (self.crossbars[0].rows,
                                      self.crossbars[0].columns)
                    tiles_map = self.crossbars[0].tiles_map
                    out = tile_matmul(input_tiles, input_tiles_map, input_shape, self.crossbar_operation(self.crossbars, \
                        lambda crossbar: crossbar.conductance_matrix), tiles_map, crossbar_shape,
                        self.ADC_resolution, self.ADC_overflow_rate, self.quant_method)
                else:
                    out = torch.matmul(
                        input.to(self.device),
                        self.crossbar_operation(
                            self.crossbars,
                            lambda crossbar: crossbar.conductance_matrix))
                    if self.quant_method is not None:
                        out = memtorch.bh.Quantize.quantize(
                            out,
                            bits=self.ADC_resolution,
                            overflow_rate=self.ADC_overflow_rate,
                            quant_method=self.quant_method)

            out = self.transform_output(out)
            if self.bias is not None:
                out += self.bias.data.view(1, -1).expand_as(out)

            return out
示例#6
0
    def forward(self, input):
        """Method to perform forward propagations.

            Parameters
            ----------
            input : torch.Tensor
                Input tensor.

            Returns
            -------
            torch.Tensor
                Output tensor.
        """
        if self.forward_legacy_enabled:
            return torch.nn.functional.conv3d(input.to(self.device),
                                              self.weight.to(self.device),
                                              bias=self.bias,
                                              stride=self.stride,
                                              padding=self.padding)
        else:
            output_dim = [0, 0, 0]
            output_dim[0] = int(
                (input.shape[2] - self.kernel_size[0] + 2 * self.padding[0]) /
                self.stride[0]) + 1
            output_dim[1] = int(
                (input.shape[3] - self.kernel_size[1] + 2 * self.padding[1]) /
                self.stride[1]) + 1
            output_dim[2] = int(
                (input.shape[4] - self.kernel_size[2] + 2 * self.padding[2]) /
                self.stride[2]) + 1
            out = torch.zeros(
                (input.shape[0], self.out_channels, output_dim[0],
                 output_dim[1], output_dim[2])).to(self.device)
            for batch in range(input.shape[0]):
                if all(item == 0 for item in self.padding):
                    batch_input = input[batch]
                else:
                    batch_input = nn.functional.pad(
                        input[batch],
                        pad=(self.padding[2], self.padding[2], self.padding[1],
                             self.padding[1], self.padding[0],
                             self.padding[0]),
                        mode="constant",
                        value=0)

                channel_idx = 0
                for channel in range(self.in_channels):
                    batch_channel_input = batch_input[channel].unsqueeze(
                        0).unsqueeze(0)
                    unfolded_batch_channel_input = batch_channel_input.unfold(
                        2, self.kernel_size[0], self.stride[0]).unfold(
                            3, self.kernel_size[1],
                            self.stride[1]).unfold(4, self.kernel_size[2],
                                                   self.stride[2])
                    unfolded_batch_channel_input = unfolded_batch_channel_input.reshape(
                        -1, self.kernel_size[0] * self.kernel_size[1] *
                        self.kernel_size[2])
                    if hasattr(self, 'non_linear'):
                        unfolded_batch_channel_input = convert_range(
                            unfolded_batch_channel_input,
                            unfolded_batch_channel_input.min(),
                            unfolded_batch_channel_input.max(), -1,
                            1).squeeze(0)
                        unfolded_batch_channel_input = unfolded_batch_channel_input.transpose(
                            1, 0).cpu().detach().numpy()
                        if hasattr(self, 'simulate'):
                            nl = False
                        else:
                            nl = True

                        if self.scheme == memtorch.bh.Scheme.DoubleColumn:
                            out[batch, :, :, :, :] += self.transform_output(
                                self.crossbar_operation(
                                    self.crossbars,
                                    lambda crossbar, input_: simulate_matmul(
                                        input_,
                                        crossbar.devices.transpose(1, 0),
                                        nl=nl),
                                    input_=unfolded_batch_channel_input.T,
                                    idx=(channel_idx, channel_idx + 1))).view(
                                        self.out_channels, output_dim[0],
                                        output_dim[1],
                                        output_dim[2]).to(self.device)
                        elif self.scheme == memtorch.bh.Scheme.SingleColumn:
                            out[batch, :, :, :, :] += self.transform_output(
                                self.crossbar_operation(
                                    self.crossbars,
                                    lambda crossbar, input_: simulate_matmul(
                                        input_,
                                        crossbar.devices.transpose(1, 0),
                                        nl=nl),
                                    input_=unfolded_batch_channel_input.T,
                                    idx=channel_idx)).view(
                                        self.out_channels, output_dim[0],
                                        output_dim[1],
                                        output_dim[2]).to(self.device)
                        else:
                            raise Exception('Scheme is currently unsupported.')
                    else:
                        if self.scheme == memtorch.bh.Scheme.DoubleColumn:
                            out[batch, :, :, :, :] += self.transform_output(
                                torch.matmul(
                                    self.crossbar_operation(
                                        self.crossbars, lambda crossbar:
                                        crossbar.conductance_matrix,
                                        (channel_idx, channel_idx + 1)),
                                    unfolded_batch_channel_input.T)).view(
                                        self.out_channels, output_dim[0],
                                        output_dim[1], output_dim[2])
                            channel_idx += 2
                        elif self.scheme == memtorch.bh.Scheme.SingleColumn:
                            out[batch, :, :, :, :] += self.transform_output(
                                torch.matmul(
                                    self.crossbar_operation(
                                        self.crossbars, lambda crossbar:
                                        crossbar.conductance_matrix,
                                        channel_idx),
                                    unfolded_batch_channel_input.T)).view(
                                        self.out_channels, output_dim[0],
                                        output_dim[1], output_dim[2])
                            channel_idx += 1
                        else:
                            raise Exception('Scheme is currently unsupported.')

                if not self.bias is None:
                    out[batch] += self.bias.view(-1, 1).expand_as(out[batch])

            return out
示例#7
0
    def forward(self, input):
        """Method to perform forward propagations.

            Parameters
            ----------
            input : torch.Tensor
                Input tensor.

            Returns
            -------
            torch.Tensor
                Output tensor.
        """
        output_dim = int(
            (input.shape[2] - self.kernel_size[0] + 2 * self.padding[0]) /
            self.stride[0]) + 1
        out = torch.zeros((input.shape[0], self.out_channels, output_dim,
                           output_dim)).to(self.device)
        for batch in range(input.shape[0]):
            unfolded_batch_input = torch.nn.functional.unfold(
                input[batch, :, :, :].unsqueeze(0),
                kernel_size=self.kernel_size,
                padding=self.padding)
            if hasattr(self, 'non_linear'):
                unfolded_batch_input = convert_range(
                    unfolded_batch_input, unfolded_batch_input.min(),
                    unfolded_batch_input.max(), -1, 1).squeeze(0)
                unfolded_batch_input = unfolded_batch_input.transpose(
                    1, 0).cpu().detach().numpy()
                if hasattr(self, 'simulate'):
                    out_ = torch.tensor(
                        self.transform_output(
                            self.crossbar_operation(
                                self.crossbars,
                                lambda crossbar, input: simulate_matmul(
                                    input,
                                    crossbar.devices.transpose(1, 0),
                                    nl=False),
                                unfolded_batch_input))).to(self.device)
                else:
                    out_ = torch.tensor(
                        self.transform_output(
                            self.crossbar_operation(
                                self.crossbars,
                                lambda crossbar, input: simulate_matmul(
                                    input,
                                    crossbar.devices.transpose(1, 0),
                                    nl=True),
                                unfolded_batch_input))).to(self.device)
            else:
                out_ = self.transform_output(
                    torch.matmul(
                        self.crossbar_operation(
                            self.crossbars,
                            lambda crossbar: crossbar.conductance_matrix),
                        unfolded_batch_input))

            if not self.bias is None:
                out_ += self.bias.view(-1, 1).expand_as(out_)

            out[batch] = out_.view(size=(1, self.out_channels, output_dim,
                                         output_dim))

        return out
示例#8
0
def test_crossbar(shape):
    device = torch.device("cpu" if "cpu" in memtorch.__version__ else "cuda")
    memristor_model = memtorch.bh.memristor.LinearIonDrift
    memristor_model_params = {"time_series_resolution": 1e-3}
    pos_crossbar = memtorch.bh.crossbar.Crossbar(
        memristor_model, memristor_model_params, shape
    )
    pos_crossbar.devices[0][0].g = 1 / pos_crossbar.r_off_mean
    pos_crossbar.devices[0][1].g = 1 / pos_crossbar.r_on_mean
    neg_crossbar = copy.deepcopy(pos_crossbar)
    pos_conductance_matrix, neg_conductance_matrix = naive_map(
        torch.zeros(shape).uniform_(0, 1),
        memristor_model().r_on,
        memristor_model().r_off,
        memtorch.bh.crossbar.Scheme.DoubleColumn,
    )
    pos_crossbar.write_conductance_matrix(
        pos_conductance_matrix, transistor=False, programming_routine=naive_program
    )
    neg_crossbar.write_conductance_matrix(
        pos_conductance_matrix, transistor=False, programming_routine=naive_program
    )

    crossbar = memtorch.bh.crossbar.Crossbar(
        memristor_model, memristor_model_params, shape
    )
    conductance_matrix = naive_map(
        torch.zeros(shape).uniform_(0, 1),
        memristor_model().r_on,
        memristor_model().r_off,
        memtorch.bh.crossbar.Scheme.SingleColumn,
    )
    crossbar.write_conductance_matrix(conductance_matrix)
    if sys.version_info > (3, 6):
        crossbar.update(from_devices=False, parallelize=True)
        assert torch.all(
            torch.isclose(
                conductance_matrix.T[:, :],
                crossbar.conductance_matrix.cpu()[:, :],
                atol=1e-5,
            )
        )
        assert crossbar.devices[0][0].g == crossbar.conductance_matrix[0][0].item()

    crossbar.update(from_devices=False, parallelize=False)
    assert crossbar.devices[0][0].g == crossbar.conductance_matrix[0][0].item()
    inputs = torch.zeros(shape).uniform_(0, 1)
    assert torch.all(
        torch.isclose(
            simulate_matmul(inputs, crossbar).float(),
            torch.matmul(inputs, conductance_matrix.T).float().to(device),
            rtol=1e-1,
        )
    )
    programming_signal = gen_programming_signal(
        1, 1e-3, 0, 1, memristor_model_params["time_series_resolution"]
    )
    assert type(programming_signal) == tuple
    with pytest.raises(AssertionError):
        gen_programming_signal(
            1, 1e-4, 0, 1, memristor_model_params["time_series_resolution"]
        )

    point = (0, 0)
    row, column = point
    conductance_to_program = random.uniform(
        1 / crossbar.devices[row][column].r_off, 1 / crossbar.devices[row][column].r_on
    )
    crossbar.devices = naive_program(
        crossbar, (row, column), conductance_to_program, rel_tol=0.01
    )
    assert math.isclose(
        conductance_to_program, crossbar.devices[row][column].g, abs_tol=1e-4
    )
    with pytest.raises(AssertionError):
        naive_program(crossbar, (row, column), -1)