Exemplo n.º 1
0
    def learn(self, net, grad):
        print("TrainRprop learn here")
        print(
            "TrainRprop before:\n self.rate = {0}\nself.x = {1}\nself.grad_prev = {2}".format(
                self.rate, self.x, self.grad_prev
            )
        )
        prod = grad * self.grad_prev
        # Sign not change
        ind = prod > 0
        print(
            "Parameters is:\ngrad = {0}\nind={1}\nself.rate_inc = {2}\nself.rate_dec = {3}\nself.rate_max = {4}\nself.rate_min = {5}".format(
                grad, ind, self.rate_inc, self.rate_dec, self.rate_max, self.rate_min
            )
        )
        self.rate[ind] *= self.rate_inc
        # Sign change
        ind = prod < 0
        self.rate[ind] *= self.rate_dec

        self.rate[self.rate > self.rate_max] = self.rate_max
        self.rate[self.rate < self.rate_min] = self.rate_min

        self.x -= self.rate * np.sign(grad)
        self.grad_prev = grad
        print(
            "TrainRprop after:\n self.rate = {0}\nself.x = {1}\nself.grad_prev = {2}".format(
                self.rate, self.x, self.grad_prev
            )
        )
        return None
Exemplo n.º 2
0
    def __call__(self, net, input, target):
        layer = net.layers[0]
        if self.adapt:
            while True:
                self.epochf(None, net, input, target)

                for inp, tar in zip(input, target):
                    out = net.step(inp)
                    err = tar - out
                    win = np.argmax(layer.out)
                    if np.max(err) == 0.0:
                        layer.np["w"][win] += self.lr * (inp - layer.np["w"][win])
                    else:
                        layer.np["w"][win] -= self.lr * (inp - layer.np["w"][win])
        else:
            while True:
                output = []
                winners = []
                for inp, tar in zip(input, target):
                    out = net.step(inp)
                    output.append(out)
                    winners.append(np.argmax(layer.out))

                e = self.error(net, input, target, output)
                self.epochf(e, net, input, target)

                error = target - output
                sign = np.sign((np.max(error, axis=1) == 0) - 0.5)
                layer.np["w"][winners] += self.lr * (input - layer.np["w"][winners])
        return None
Exemplo n.º 3
0
    def learn(self, net, grad):

        print("TrainRpropM learn here")
        prod = grad * self.grad_prev
        # Sign not change
        ind = prod > 0
        self.rate[ind] *= self.rate_inc
        # Sign change
        ind = prod < 0
        # Back step
        self.x[ind] -= self.rate[ind] * np.sign(grad[ind])
        grad[ind] *= -1

        self.rate[ind] *= self.rate_dec

        self.rate[self.rate > self.rate_max] = self.rate_max
        self.rate[self.rate < self.rate_min] = self.rate_min

        self.x -= self.rate * np.sign(grad)
        self.grad_prev = grad
        return None
Exemplo n.º 4
0
    def deriv(self, e):
        """
        Derivative of SAE error function

        :Parameters:
            e: ndarray
                current errors: target - output
        :Returns:
            d: ndarray
                Derivative: dE/d_out

        """
        d = np.sign(e) / e.size
        return d
Exemplo n.º 5
0
def initnw(layer):
    """
    Nguyen-Widrow initialization function

    """
    ci = layer.ci
    cn = layer.cn
    w_fix = 0.7 * cn ** (1. / ci)
    w_rand = np.random.rand(cn, ci) * 2 - 1
    # Normalize
    if ci == 1:
        w_rand = w_rand / np.abs(w_rand)
    else:
        w_rand = w_rand * np.sqrt(1. / np.square(w_rand).sum(axis=1).reshape(cn, 1))

    w = w_fix * w_rand
    b = np.array([0]) if cn == 1 else w_fix * np.linspace(-1, 1, cn) * np.sign(w[:, 0])

    # Scaleble to inp_active
    amin, amax  = layer.transf.inp_active
    amin = -1 if amin == -np.Inf else amin
    amax = 1 if amax == np.Inf else amax

    x = 0.5 * (amax - amin)
    y = 0.5 * (amax + amin)
    w = x * w
    b = x * b + y

    # Scaleble to inp_minmax
    minmax = layer.inp_minmax.copy()
    minmax[np.isneginf(minmax)] = -1
    minmax[np.isinf(minmax)] = 1

    x = 2. / (minmax[:, 1] - minmax[:, 0])
    y = 1. - minmax[:, 1] * x
    w = w * x

    b += np.dot(w, y)
    layer.np['w'][:] = w
    layer.np['b'][:] = b

    return