def forward(self, x):
     '''
     Performs a forward pass through the network.
     Params:
         x - np.array, the input vector
     '''
     for i in range(len(self.weights)):
         x = self.add_bias(x)
         self.a[i] = x
         weights = self.weights[i]
         act_fun = self.act_funs[i]
         z = np.dot(weights, x)
         self.z[i] = z
         if act_fun == 'sigmoid':
             a = fun.sigmoid(z)
         elif act_fun == 'ReLU':
             a = fun.ReLU(z)
         elif act_fun == 'tanh':
             a = fun.tanh(z)
         elif act_fun == 'softmax':
             a = fun.softmax(z)
         elif act_fun == 'ELU':
             a = fun.ELU(z)
         x = a
     return x
Exemplo n.º 2
0
def forward_propagation(W, b, x, n_hl, ac):
    h, a = [[]], [[]]

    _h, _a = [], []

    for i in range(1, n_hl + 1):
        if i == 1:
            _a = np.dot(W[i], x) + b[i]
        else:
            _a = np.dot(W[i], h[i - 1]) + b[i]

        if ac == "sig":
            _h = functions.logistic(_a)

        elif ac == "tanh":
            _h = functions.tanh(_a)

        elif ac == "relu":
            _h = functions.ReLU(_a)

        a.append(_a)
        h.append(_h)

    _a = np.dot(W[n_hl + 1], h[n_hl]) + b[n_hl + 1]
    _y = functions.softmax(_a - max(_a))

    a.append(_a)
    h.append(_y)

    return h, a
Exemplo n.º 3
0
Arquivo: lstm.py Projeto: saska/lstm
    def backward(self, state, da_next, dc_next):
        """Cell backward.
        Args:
            state: Cell state at t.
            da_next: Activation gradient of cell t (gradient w.r.t activation input to t+1).
            dc_next: Cell state gradient of cell t (gradient w.r.t cell input to t+1).

        Returns:
            da_in: Activation gradient of cell t-1 (gradient w.r.t activation input to t).
            dc_in: Cell state gradient of cell t-1 (gradient w.r.t cell input to t).
            grads: Dictionary of gate gradients for updating params.
        """
        dc_out = state['o'] * da_next * d_tanh(state['c_out']) + dc_next
        grads = self.init_grads()

        d = {}
        d['c'] = (1 - state['c']**2) * state['u'] * dc_out
        d['u'] = state['u'] * (1 - state['u']) * state['c'] * dc_out
        d['o'] = state['o'] * (1 - state['o']) * tanh(
            state['c_out'])[0] * da_next
        d['f'] = state['f'] * (1 - state['f']) * state['c_in'] * dc_out
        da_in = np.zeros_like(da_next)
        for gate in ['c', 'u', 'o', 'f']:
            da_in += np.dot(self.params[gate]['w'].T[:self.hidden_dim, :],
                            d[gate])
            grads[gate]['b'] = np.sum(d[gate], axis=1, keepdims=True)
            grads[gate]['w'] = np.dot(d[gate], state['z'].T)

        dc_in = dc_out * state['f']

        return da_in, dc_in, grads
Exemplo n.º 4
0
Arquivo: lstm.py Projeto: saska/lstm
    def forward(self, x, a_prev, c_prev):
        """Cell forward.
        Args:
            x: Input data.
            a_prev: Activation at t-1.
            c_prev: Cell state at t-1.

        Returns:
            state: Dictionary of states of the different activations at t.
            cache: Dictionary of gate activation function inputs.
        """
        a_prev = a_prev if a_prev is not None else np.zeros(
            (self.hidden_dim, x.shape[0]))
        c_prev = c_prev if c_prev is not None else np.zeros(
            (self.hidden_dim, x.shape[0]))

        state = {}
        state['c_in'] = c_prev
        state['z'] = np.vstack((a_prev, x.T))

        cache = {}
        for k, func in self.funcs.items():
            state[k], cache[k] = func['a'](
                np.dot(self.params[k]['w'], state['z']) + self.params[k]['b'])

        state['c_out'] = state['f'] * c_prev + state['u'] * state['c']
        state['a_out'] = state['o'] * tanh(state['c_out'])[0]

        return state, cache
Exemplo n.º 5
0
def jsn(u, m):
    """
    Computes of the Jacobi elliptic sn function in terms
    of Jacobi theta functions.
    `u` is any complex number, `m` must be in the unit disk

    The sn-function is doubly periodic in the complex
    plane with periods `4 K(m)` and `2 i K(1-m)`
    (see :func:`ellipk`)::

        >>> from mpmath import *
        >>> mp.dps = 25
        >>> print jsn(2, 0.25)
        0.9628981775982774425751399
        >>> print jsn(2+4*ellipk(0.25), 0.25)
        0.9628981775982774425751399
        >>> print chop(jsn(2+2*j*ellipk(1-0.25), 0.25))
        0.9628981775982774425751399

    """
    if abs(m) < eps:
        return sin(u)
    elif m == one:
        return tanh(u)
    else:
        extra = 10
    try:
        mp.prec += extra
        q = calculate_nome(sqrt(m))

        v3 = jtheta(3, zero, q)
        v2 = jtheta(2, zero, q)        # mathworld says v4
        arg1 = u / (v3*v3)
        v1 = jtheta(1, arg1, q)
        v4 = jtheta(4, arg1, q)

        sn = (v3/v2)*(v1/v4)
    finally:
        mp.prec -= extra

    return sn
Exemplo n.º 6
0
def predict(W, b, x, n_hl, ac):
    a, h = [], []
    for i in range(1, n_hl + 1):
        if i == 1:
            a = np.dot(W[i], x) + b[i]
        else:
            a = np.dot(W[i], h) + b[i]

        if ac == "sig":
            h = functions.logistic(a)

        elif ac == "tanh":
            h = functions.tanh(a)

        elif ac == "relu":
            h = functions.ReLU(a)

    a = np.dot(W[n_hl + 1], h) + b[n_hl + 1]
    y_pred = functions.softmax(a - max(a))

    return y_pred
Exemplo n.º 7
0
    def forward(self, x):
        """
        X denoting one training sample or a sentence

        Args:
            x ([type]): [description]
        """
        T = len(x)
        # we are saving all s in an numpy array
        s = np.zeros((T + 1, self.hidden_size))
        s[-1] = np.zeros(self.hidden_size)  # we initialize it with zeros
        o = np.zeros((T, self.vocab_size))
        for t in np.arange(T):
            # x is comming as an array of numbers we need to convert each
            # number to on hot vector
            x_vector = np.zeros(self.vocab_size)

            x_vector[x[t]] = 1

            s[t] = tanh(np.dot(self.U, x_vector) + self.W.dot(s[t - 1]))
            o[t] = softmax(self.V.dot(s[t]))
        return o, s
Exemplo n.º 8
0
 def forward(self, input):
     """Apply the hyperbolic tangent activation function on the input and
     save the input."""
     self.input = input
     return functions.tanh(input)
Exemplo n.º 9
0
 def tanh(self):
     return F.tanh(self)