Beispiel #1
0
    def __init__(
        self, net, input, target, lr=0.07, adapt=False, rate_dec=0.5, rate_inc=1.2, rate_min=1e-9, rate_max=50
    ):

        super(TrainRprop, self).__init__(net, input, target, lr, adapt)
        self.rate_inc = rate_inc
        self.rate_dec = rate_dec
        self.rate_max = rate_max
        self.rate_min = rate_min
        size = tool.np_size(net)
        self.grad_prev = np.zeros(size)
        self.rate = np.zeros(size) + lr
Beispiel #2
0
 def __init__(self, ci, cn, co, property):
     self.ci = ci
     self.cn = cn
     self.co = co
     self.np = {}
     for p, shape in property.items():
         self.np[p] = np.empty(shape)
     self.inp = np.zeros(ci)
     self.out = np.zeros(co)
     # Property must be change when init Layer
     self.out_minmax = np.empty([self.co, 2])
     # Property will be change when init Net
     self.inp_minmax = np.empty([self.ci, 2])
     self.initf = None
Beispiel #3
0
    def __init__(self, inp_minmax, co, layers, connect, trainf, errorf):
        self.inp_minmax = np.asfarray(inp_minmax)
        self.out_minmax = np.zeros([co, 2])
        self.ci = self.inp_minmax.shape[0]
        self.co = co
        self.layers = layers
        self.trainf = trainf
        self.errorf = errorf
        self.inp = np.zeros(self.ci)
        self.out = np.zeros(self.co)
        # Check connect format
        assert self.inp_minmax.ndim == 2
        assert self.inp_minmax.shape[1] == 2
        if len(connect) != len(layers) + 1:
            raise ValueError("Connect error")
        # Check connect links
        tmp = [0] * len(connect)
        for con in connect:
            for s in con:
                if s != -1:
                    tmp[s] += 1
        for l, c in enumerate(tmp):
            if c == 0 and l != len(layers):
                raise ValueError("Connect error: Lost the signal " + "from the layer " + str(l - 1))
        self.connect = connect

        # Set inp_minmax for all layers
        for nl, nums_signal in enumerate(self.connect):
            if nl == len(self.layers):
                minmax = self.out_minmax
            else:
                minmax = self.layers[nl].inp_minmax
            ni = 0
            for ns in nums_signal:
                t = self.layers[ns].out_minmax if ns != -1 else self.inp_minmax
                if ni + len(t) > len(minmax):
                    raise ValueError("Connect error: on layer " + str(l - 1))
                minmax[ni : ni + len(t)] = t
                ni += len(t)
            if ni != len(minmax):
                raise ValueError("Connect error: Empty inputs on layer " + str(l - 1))
        self.init()
Beispiel #4
0
def np_get(net):
    """
    Get all network parameters in one array

    """
    size = np_size(net)
    result = np.zeros(size)
    start = 0
    for l in net.layers:
        for prop in l.np.values():
            result[start: start+prop.size] = prop.flat[:]
            start += prop.size
    return result
Beispiel #5
0
def ff_grad(net, input, target):
    """
    Calc and accumulate gradient with backpropogete method,
    for feed-forward neuran networks on each step

    :Parametrs:
        net: Net
            Feed-forward network
        input: array, shape = N,net.ci
            Input array
        target: array, shape = N,net.co
            Train target
        deriv: callable
            Derivative of error function
    :Returns:
        grad: list of dict
            Dradient of net for each layer,
            format:[{'w':..., 'b':...},{'w':..., 'b':...},...]
        grad_flat: array
            All neurons propertys in 1 array (reference of grad)
        output: array
            output of network

    """
    grad_flat = np.zeros(np_size(net))
    grad = []
    st = 0
    for i, l in enumerate(net.layers):
        grad.append({})
        for k, v in l.np.items():
            grad[i][k] = grad_flat[st: st + v.size]
            grad[i][k] = grad[i][k].reshape(v.shape) #replacement for "            grad[i][k].shape = v.shape"
            st += v.size
    output = np.empty(shape = (len(target),)+net.out.shape)
    i = 0
    for inp, tar in zip(input, target):
        out = net.step(inp)
        ff_grad_step(net, out, tar, grad)
        #import sys, traceback
        #traceback.print_stack()
        #raise(ValueError("Exit 1 for live {0}".format(grad_flat)))
        output[i] = out
        i += 1

    #Dirty hack
    st = 0
    for i, l in enumerate(net.layers):
        for k, v in l.np.items():
            grad_flat[st: st + v.size] = grad[i][k].reshape(v.size)
            st += v.size
    return grad, grad_flat, output
Beispiel #6
0
 def __init__(self, ci, cn, transf, max_iter, delta):
     Layer.__init__(self, ci, cn, cn, {'w': (cn, ci), 'b': cn})
     self.max_iter = max_iter
     self.delta = delta
     self.transf = transf
     self.outs = []
     if not hasattr(transf, 'out_minmax'):
         test = np.asfarry([-1e100, -100, -10, -1, 0, 1, 10, 100, 1e100])
         val = self.transf(test)
         self.out_minmax = np.array([val.min(), val.max()] * self.co)
     else:
         self.out_minmax = np.asfarray([transf.out_minmax] * self.co)
     self.initf = None
     self.s = np.zeros(self.cn)
Beispiel #7
0
def ff_grad_step(net, out, tar, grad=None):
    """
    Calc gradient with backpropogete method,
    for feed-forward neuran networks on each step

    :Parametrs:
        net: Net
            Feed-forward network
        inp: array, size = net.ci
            Input array
        tar: array, size = net.co
            Train target
        deriv: callable
            Derivative of error function
        grad:list of dict default(None)
            Grad on previous step
    :Returns:
        grad: list of dict
            Gradient of net for each layer,
            format:[{'w':..., 'b':...},{'w':..., 'b':...},...]

    """
    delt = [None] * len(net.layers)
    if grad is None:
        grad = []
        for i, l in enumerate(net.layers):
            grad.append({})
            for k, v in l.np.items():
                grad[i][k] = np.zeros(v.shape)
    e = out - tar
    # for output layer
    ln = len(net.layers) - 1
    layer = net.layers[ln]
    delt[ln] = net.errorf.deriv(e) * layer.transf.deriv(layer.s, out)
    delt[ln] = delt[ln].reshape((delt[ln].size, 1,)) #replacement for "    delt[ln].shape = delt[ln].size, 1"
    grad[ln]['w'] += delt[ln] * layer.inp
    grad[ln]['b'] += delt[ln].reshape(delt[ln].size)

    bp = range(len(net.layers) -2, -1, -1)
    for ln in bp:
        layer = net.layers[ln]
        next = ln + 1

        dS = np.sum(net.layers[next].np['w'] * delt[next], axis=0)
        delt[ln] = dS * layer.transf.deriv(layer.s, layer.out)
        delt[ln] = delt[ln].reshape((delt[ln].size, 1,)) #replacement for "        delt[ln].shape = delt[ln].size, 1"

        grad[ln]['w'] += delt[ln] * layer.inp
        grad[ln]['b'] += delt[ln].reshape(delt[ln].size)
    return grad
Beispiel #8
0
    def __init__(self, ci, cn, transf):

        Layer.__init__(self, ci, cn, cn, {'w': (cn, ci), 'b': cn})

        self.transf = transf
        if not hasattr(transf, 'out_minmax'):
            test = np.asfarry([-1e100, -100, -10, -1, 0, 1, 10, 100, 1e100])
            val = self.transf(test)
            self.out_minmax = np.array([val.min(), val.max()] * self.co)
        else:
            self.out_minmax = np.asfarray([transf.out_minmax] * self.co)
        # default init function
        self.initf = init.initwb_reg
        #self.initf = init.initwb_nw
        self.s = np.zeros(self.cn)
Beispiel #9
0
    def sim(self, input):
        """
        Simulate a neural network

        :Parameters:
            input: array like
                array input vectors
        :Returns:
            outputs: array like
                array output vectors
        """
        input = np.asfarray(input)
        assert input.ndim == 2
        assert input.shape[1] == self.ci

        output = np.zeros([len(input), self.co])

        for inp_num, inp in enumerate(input):
            output[inp_num, :] = self.step(inp)

        return output