Beispiel #1
0
    def __call__(self, net, input, target):
        layer = net.layers[0]
        if self.adapt:
            while True:
                self.epochf(None, net, input, target)

                for inp, tar in zip(input, target):
                    out = net.step(inp)
                    err = tar - out
                    win = np.argmax(layer.out)
                    if np.max(err) == 0.0:
                        layer.np["w"][win] += self.lr * (inp - layer.np["w"][win])
                    else:
                        layer.np["w"][win] -= self.lr * (inp - layer.np["w"][win])
        else:
            while True:
                output = []
                winners = []
                for inp, tar in zip(input, target):
                    out = net.step(inp)
                    output.append(out)
                    winners.append(np.argmax(layer.out))

                e = self.error(net, input, target, output)
                self.epochf(e, net, input, target)

                error = target - output
                sign = np.sign((np.max(error, axis=1) == 0) - 0.5)
                layer.np["w"][winners] += self.lr * (input - layer.np["w"][winners])
        return None
Beispiel #2
0
    def error(self, net, input):
        layer = net.layers[0]
        winner_output = np.zeros_like(input)
        output = net.sim(input)
        winners = np.argmax(output, axis=1)
        e = layer.np["w"][winners] - input

        return net.errorf(e)
Beispiel #3
0
    def learn(self, net, input):
        layer = net.layers[0]

        for inp in input:
            out = net.step(inp)
            winner = np.argmax(out)
            d = layer.last_dist
            print("TrainWTA learn here")
            layer.np["w"][winner] += self.lr * d[winner] * (inp - layer.np["w"][winner])

        return None
Beispiel #4
0
    def learn(self, net, input):
        layer = net.layers[0]

        for inp in input:
            out = net.step(inp)
            winner = np.argmax(out)
            d = layer.last_dist  # TODO:^^_^^
            layer.np["conscience"][winner] += 1
            print("TrainCWTA learn here")
            layer.np["w"][winner] += self.lr * d[winner] * (inp - layer.np["w"][winner])

        layer.np["conscience"].fill(1.0)
        return None