示例#1
0
    def _calcResidual(self, step_out, tmp_results, step_in, data):

        f_new = clarray.vdot(tmp_results["DADA"], tmp_results["DAd"]) + clarray.sum(
            self.lambd
            * clmath.log(1 + clarray.vdot(tmp_results["gradx"], tmp_results["gradx"]))
        )

        # TODO: calculate on GPU
        f_new = np.linalg.norm(f_new.get())

        grad_f = np.linalg.norm(tmp_results["gradFx"].get())

        # TODO: datacosts calculate or get from outside!!!!
        # datacost = 0  # self._fval_init
        # TODO: calculate on GPU
        datacost = 2 * np.linalg.norm(tmp_results["Ax"] - data) ** 2
        # datacost = 2 * np.linalg.norm(data - b) ** 2
        # self._FT.FFT(b, clarray.to_device(
        #       self._queue[0], (self._step_val[:, None, ...] *
        #          self.par["C"]))).wait()
        # b = b.get()
        # datacost = 2 * np.linalg.norm(data - b) ** 2
        # TODO: calculate on GPU
        L2Cost = np.linalg.norm(step_out["x"].get()) / (2.0 * self.delta)
        regcost = self.lambd * np.sum(
            np.abs(
                clmath.log(
                    1 + clarray.vdot(tmp_results["gradx"], tmp_results["gradx"])
                ).get()
            )
        )
        costs = datacost + L2Cost + regcost
        return costs, f_new, grad_f
示例#2
0
    def _calcResidual(self, step_out, tmp_results, step_in, data):

        temp_fwd_data = self.normkrnldiff(tmp_results["Ax"], data)

        regcost = self.lambd * np.sum(
            np.abs(
                clmath.log(
                    1 + clarray.vdot(tmp_results["gradx"], tmp_results["gradx"])
                ).get()
            )
        )

        f = (
            temp_fwd_data
            + 1 / (2 * self.delta) * self.normkrnldiff(step_out["x"], step_in["xk"])
            + regcost
        )

        f_new = np.linalg.norm(f.get())

        self.normkernl(tmp_results["gradFx"], tmp_results["gradFx"])
        grad_f = np.linalg.norm(tmp_results["gradFx"].get())

        datacost = 2 * temp_fwd_data ** 2
        # L2Cost =  np.linalg.norm(self.normkrnldiff(step_out["x"], step_in["xk"]).get()) / (2.0 * self.delta)
        # L2Cost = np.linalg.norm(step_out["x"].get()) / (2.0 * self.delta)

        costs = datacost + regcost
        return costs.get(), f_new, grad_f
示例#3
0
    def log(t: Tensor) -> Tensor:
        """Returns a natural logarithm of a tensor."""

        if t.gpu:
            return Tensor(clmath.log(t._data), gpu=True)

        return Tensor(np.log(t._data))
示例#4
0
 def _rev_grad(self, valuation, adjoint, gradient, cache):
     base = cache[id(self.ops[0])]
     exp = cache[id(self.ops[1])]
     self.ops[0]._rev_grad(valuation, adjoint * exp * base**(exp - 1),
                           gradient, cache)
     self.ops[1]._rev_grad(valuation,
                           adjoint * clmath.log(base) * base**exp, gradient,
                           cache)
示例#5
0
    def _fwd_grad(self, wrt, valuation, cache):
        base = cache[id(self.ops[0])]
        exp = cache[id(self.ops[1])]
        dbase = self.ops[0]._fwd_grad(wrt, valuation, cache)
        dexp = self.ops[1]._fwd_grad(wrt, valuation, cache)

        # TODO div by zero check (base == 0)
        return base**(exp - 1) * (exp * dbase + base * dexp * clmath.log(base))
示例#6
0
文件: mynp.py 项目: ixtel/neurolabcl
def log(arr):
    res = clmath.log(arr, queue=queue)
    res.__class__ = myclArray
    res.reinit()
    return res
示例#7
0
 def _evaluate(self, valuation, cache):
     if id(self) not in cache:
         cache[id(self)] = clmath.log(self.ops[0]._evaluate(
             valuation, cache))
     return cache[id(self)]