def add_param_DEPRECATED(self, param, key=None, shape=None, length=None):
     logging.warning("add_param method is DEPRECATED")
     self._update_param_info_deprecated()
     self.AddParameter(param)
     if key is not None and self.net.input_record() is not None:
         idx = self.net.input_record().field_blobs().index(key)
         key = self.net.input_record().field_names()[idx]
     shape = shape if shape is not None else self._infer_param_shape(param)
     if not isinstance(param, core.BlobReference):
         raise ValueError("Param %s must be a BlobReference!" % str(param))
     self._param_info_deprecated.append(
         parameter_info.ParameterInfo(
             param_id=len(self._param_info_deprecated),
             param=param,
             shape=shape,
             key=key,
             length=length,
         ))
     return self._param_info_deprecated[-1]
    def AddGradientOperators(self, *args, **kwargs):
        if self.gradient_ops_added:
            raise RuntimeError("You cannot run AddGradientOperators twice.")
        self.gradient_ops_added = True
        self.grad_map = self.net.AddGradientOperators(*args, **kwargs)
        self.param_to_grad = self.get_param_to_grad(self.params)

        # Populate ParameterInfo for all parameters if missing
        # and add gradient blob information. So optimizers can use it
        for param, grad in self.param_to_grad.items():
            param_info = self.get_param_info(param)
            if param_info:
                param_info.grad = grad
            else:
                self._parameters_info[param] = parameter_info.ParameterInfo(
                    param_id=None,
                    param=param,
                    grad=grad,
                )

        return self.grad_map