Ejemplo n.º 1
0
    def apply_drift_noise_to_conductance(self, g_prog: Tensor,
                                         nu_drift: Tensor,
                                         t_inference: float) -> Tensor:
        """Apply the noise and drift up to the assumed inference time
        point based on PCM measurements."""
        t = t_inference + self.t_0

        # drift
        if t > self.t_0:
            g_drift = g_prog * ((t / self.t_0)**(-nu_drift))
        else:
            g_drift = g_prog

        # expected accumulated 1/f noise since start of programming at t=0
        if t > 0:
            q_s = (0.0088 / (
                (torch_abs(g_prog) / self.g_max)**0.65).clamp(min=1e-3)).clamp(
                    max=0.2)
            sig_noise = q_s * sqrt(
                numpy_log((t + self.t_read) / (2 * self.t_read)))
            g_final = g_drift + torch_abs(g_drift) * self.read_noise_scale \
                * sig_noise * randn_like(g_prog)
        else:
            g_final = g_prog

        return g_final.clamp(min=0.0)
Ejemplo n.º 2
0
    def generate_drift_coefficients(self, g_target: Tensor) -> Tensor:
        """Return drift coefficients ``nu`` based on PCM measurements."""
        g_relative = clamp(torch_abs(g_target / self.g_max), min=_ZERO_CLIP)

        # gt should be normalized wrt g_max
        mu_drift = (-0.0155 * log(g_relative) + 0.0244).clamp(min=0.049,
                                                              max=0.1)
        sig_drift = (-0.0125 * log(g_relative) - 0.0059).clamp(min=0.008,
                                                               max=0.045)
        nu_drift = torch_abs(mu_drift +
                             sig_drift * randn_like(g_relative)).clamp(min=0.0)

        return nu_drift * self.drift_scale
Ejemplo n.º 3
0
    def generate_drift_coefficients(self, g_target: Tensor) -> Tensor:
        """Return drift coefficients ``nu``."""

        mu_drift = self.drift_nu_mean
        sig_drift = self.drift_nu_std
        nu_drift = torch_abs(mu_drift +
                             sig_drift * randn_like(g_target)).clamp(min=0.0)
        return nu_drift * self.drift_scale
Ejemplo n.º 4
0
 def lossMAE(self, v, t):
   """
   calculate the loss for MAE
   :param v:
   :param t:
   :return:
   """
   return torch_sum(torch_abs(v-t))
Ejemplo n.º 5
0
    def convert_to_conductances(self,
                                weights: Tensor) -> Tuple[List[Tensor], Dict]:
        abs_max = torch_abs(weights).max()
        scale_ratio = (self.g_max - self.g_min) / abs_max.clamp(min=_ZERO_CLIP)
        scaled_weights = weights * scale_ratio

        conductances = [
            scaled_weights.clamp(min=0.0, max=self.g_max) + self.g_min,
            (-scaled_weights).clamp(min=0.0, max=self.g_max) + self.g_min
        ]
        params = {'scale_ratio': scale_ratio}

        return conductances, params
Ejemplo n.º 6
0
 def readout(self, out_tensor: Tensor) -> Tensor:
     """Read outs the abs max."""
     return clamp(torch_abs(out_tensor).max(), min=0.0001)
    def test_gradients_and_parameter_updates(self):
        """
        Test that all parameters undergo loss gradient computation with
        respect to them and are subsequently updated.
        """
        # switching to training mode so that all parameters can undergo
        # backpropagation:
        self.layer.train()

        # defining an optimizer for updating all parameters of the layer -
        # learning rate is exaggerated to have meaningful updates for all
        # parameters even where their gradient is very weak:
        learning_rate = 1e12
        optimizer = SGD(self.layer.parameters(), lr=learning_rate)

        # making sure there is no gradient computation cumulated for any
        # parameter making each parameter's gradient is not defined yet:
        optimizer.zero_grad(set_to_none=True)

        # taking an initial snapshot of all parameters before any
        # backpropagation pass:
        initial_parameter_dict = {
            name: deepcopy(parameter_vector)
            for name, parameter_vector in self.layer.named_parameters()
        }

        # computing the layer outputs after a forward propagation pass:
        outputs = self.layer(**self.forward_propagation_kwargs)

        # computing an hypothetical loss - averaging outputs for convenience:
        loss = outputs.mean()

        # computing loss gradients with respect to all layer parameters that
        # require gradient computation:
        loss.backward()

        # asserting that every parameter that requires gradient computation
        # has undergone loss gradient computation:

        subtest_base_name = "gradients"
        # for every parameter vector:
        for name, parameter_vector in self.layer.named_parameters():
            subtest_name = subtest_base_name + ' - ' + name
            with self.subTest(subtest_name):
                # only parameters that require gradient computation are
                # considered:
                if parameter_vector.requires_grad:
                    gradients = parameter_vector.grad
                    self.assertIsNotNone(gradients)
                    # asserting that at least a single parameter gradient in
                    # the vector of parameters is different from zero:
                    self.assertNotEqual(0., torch_sum(torch_abs(gradients)))

        # updating all layer parameters based on their gradients:
        optimizer.step()

        # asserting that every parameter has been updated:

        subtest_base_name = "parameter updates"
        # for every parameter vector:
        for name, updated_parameter_vector in self.layer.named_parameters():
            subtest_name = subtest_base_name + ' - ' + name
            with self.subTest(subtest_name):
                # only parameters that require gradient computation. i.e.
                # adjustment, are considered:
                if updated_parameter_vector.requires_grad:
                    self.assertFalse(
                        torch_equal(
                            initial_parameter_dict[name],  # initial values
                            updated_parameter_vector  # updated values
                        ))