def get_next_state(self, cell, neighbours):
        # get input vector from neighbours
        input_vector = self.get_input_vector(cell, neighbours)

        # get hidden layer
        a1 = np.insert(input_vector, 0, 1)
        hidden_layer = np.tanh(np.dot(self.theta1, a1))

        # get output layer
        a2 = np.insert(hidden_layer, 0, 1)
        out_vector = np.tanh(np.dot(self.theta2, a2))

        # set new internal state and chemicals of cell
        new_internal_state = out_vector[:self.internal_state_vector_length]
        new_chemicals = out_vector[-self.chemicals_vector_length:]

        # add previous chemical concentration
        new_chemicals += cell.state.chemicals

        # get new color of cell
        color_vector = np.append(new_internal_state, new_chemicals)
        a3 = np.insert(color_vector, 0, 1)
        new_color = (np.tanh(np.dot(self.theta3, a3)) + 1) / 2

        # finally create new state
        new_state = cell.state.create_state()
        new_state.chemicals = new_chemicals
        new_state.internal = new_internal_state
        new_state.grayscale = int(new_color * 255)
        return new_state
Example #2
0
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
    """
    Compute the sigmoid kernel between X and Y::

        K(X, Y) = tanh(gamma <X, Y> + coef0)

    Parameters
    ----------
    X : array of shape (n_samples_1, n_features)

    Y : array of shape (n_samples_2, n_features)

    degree : int

    Returns
    -------
    Gram matrix: array of shape (n_samples_1, n_samples_2)
    """
    X, Y = check_pairwise_arrays(X, Y)
    if gamma is None:
        gamma = 1.0 / X.shape[1]

    K = linear_kernel(X, Y)
    K *= gamma
    K += coef0
    np.tanh(K, K)   # compute tanh in-place
    return K
Example #3
0
def calc_v(pos, t, x_table, c_attract, dc_attract, c_repel, dc_repel):
    '''Calculate drift velocity as a function of c(pos,t) and dc(pos,t)'''
    v = 22. # µm/s
    chi = 50000. # µm^2/s
    chi2 = 50000. # µm^2/s
    k = 0.125 # mM
    k2 = 0.125 # mM
    if pos < x_table[0]:
        pos = x_table[0]
        c, dc = int_tables(pos, t, x_table, c_attract, dc_attract)
        c2, dc2 = int_tables(pos, t, x_table, c_repel, dc_repel)
    elif pos > x_table[-1]:
        pos = x_table[-1]
        c, dc = int_tables(pos, t, x_table, c_attract, dc_attract)
        c2, dc2 = int_tables(pos, t, x_table, c_repel, dc_repel)
    else:
        c, dc = int_tables(pos, t, x_table, c_attract, dc_attract)
        c2, dc2 = int_tables(pos, t, x_table, c_repel, dc_repel)
    v1 = ((8*v)/(3*np.pi))*np.tanh(((chi*np.pi)/(8*v))*(k/np.power((k+c),2))*dc)
    v2 = ((8*v)/(3*np.pi))*np.tanh(((chi2*np.pi)/(8*v))*(k2/np.power((k+c2),2))*dc2)
    #print v1, v2
    if np.abs(v1) > np.abs(v2):
        return v1
    else:
        return v2
	def set_activation(self, method, sig=None, d_sig=None, sig_0=None, d_sig_0=None):
		"""
		This method sets the activation functions. 

		Parameters
		----------
		method : str
			'logistic' , 'htanget', or 'custom'
		sig, d_sig, sig_0, and d_sig_0 : function objects
			Optional arguments intended for use with the 
			'custom' method option. They should be functions. 
			sig_0 and d_sig_0 are the output layer activation functions.
		"""
		method = method.lower()

		if method == 'logistic':
			self.sig = lambda z: twod(1 / (1 + np.exp(-z)))
			self.d_sig = lambda z: twod(np.multiply(self.sig(z), (1 - self.sig(z))))
			self.sig_0 = self.sig
			self.d_sig_0 = self.d_sig
		elif method == 'htangent':
			self.sig = lambda z: twod(np.tanh(z))
			self.d_sig = lambda z: twod(1 - np.power(np.tanh(z), 2))
			self.sig_0 = self.sig
			self.d_sig_0 = self.d_sig
		elif method == 'custom':
			self.sig = sig
			self.d_sig = d_sig
			self.sig_0 = sig_0
			self.d_sig_0 = d_sig_0
		else:
			raise ValueError('NNetClassify.set_activation: ' + str(method) + ' is not a valid option for method')

		self.activation = method
Example #5
0
    def forward(self, inputs):
        c_prev, x = inputs
        a, i, f, o = _extract_gates(x)
        batch = len(x)

        if isinstance(x, numpy.ndarray):
            self.a = numpy.tanh(a)
            self.i = _sigmoid(i)
            self.f = _sigmoid(f)
            self.o = _sigmoid(o)

            c_next = numpy.empty_like(c_prev)
            c_next[:batch] = self.a * self.i + self.f * c_prev[:batch]
            h = self.o * numpy.tanh(c_next[:batch])
        else:
            c_next = cuda.cupy.empty_like(c_prev)
            h = cuda.cupy.empty_like(c_next[:batch])
            cuda.elementwise(
                'T c_prev, T a, T i_, T f, T o', 'T c, T h',
                '''
                    COMMON_ROUTINE;
                    c = aa * ai + af * c_prev;
                    h = ao * tanh(c);
                ''',
                'lstm_fwd', preamble=_preamble)(
                    c_prev[:batch], a, i, f, o, c_next[:batch], h)

        c_next[batch:] = c_prev[batch:]
        self.c = c_next[:batch]
        return c_next, h
Example #6
0
    def forward(self, inputs):
        c_prev1, c_prev2, x1, x2 = inputs
        a1, i1, f1, o1 = _extract_gates(x1)
        a2, i2, f2, o2 = _extract_gates(x2)

        if isinstance(x1, numpy.ndarray):
            self.a1 = numpy.tanh(a1)
            self.i1 = _sigmoid(i1)
            self.f1 = _sigmoid(f1)

            self.a2 = numpy.tanh(a2)
            self.i2 = _sigmoid(i2)
            self.f2 = _sigmoid(f2)

            self.o = _sigmoid(o1 + o2)
            self.c = self.a1 * self.i1 + self.a2 * self.i2 + \
                self.f1 * c_prev1 + self.f2 * c_prev2

            h = self.o * numpy.tanh(self.c)
        else:
            self.c, h = cuda.elementwise(
                '''T c_prev1, T a1, T i1, T f1, T o1,
                   T c_prev2, T a2, T i2, T f2, T o2''',
                'T c, T h',
                '''
                    COMMON_ROUTINE;
                    c = aa1 * ai1 + af1 * c_prev1 + aa2 * ai2 + af2 * c_prev2;
                    h = ao * tanh(c);
                ''',
                'slstm_fwd', preamble=_preamble)(
                    c_prev1, a1, i1, f1, o1, c_prev2, a2, i2, f2, o2)

        return self.c, h
Example #7
0
def sample_modified(h, seed_str, n, alpha):
  """ 
  A modified sampling method from sample().

  h is updated until the end of the string before generating the text.

  seed_str: a list of indices of input string

  """
  m = len(seed_str)
  ixes = []

  # Simply output the string and Updating h
  for i in xrange(m):

    ixes.append(seed_str[i])

    x = np.zeros((vocab_size, 1))
    seed_ix = seed_str[i]
    x[seed_ix] = 1
    h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)


  # Generating the text
  for t in xrange(n):

    h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
    y = np.dot(Why, h) + by
    p = np.exp(alpha*y) / np.sum(np.exp(alpha*y))
    ix = np.random.choice(range(vocab_size), p=p.ravel())
    x = np.zeros((vocab_size, 1))
    x[ix] = 1
    ixes.append(ix)

  return ixes
Example #8
0
 def ConvLSTM(self, param, layerid, layerinfo, E, R, C, RPP, padw):
     u'''計算に使用; ConvLSTMの計算'''
     si = str(layerid)
     ch, h, w = layerinfo
     zi = tensor.nnet.conv.conv2d(self.spad2d(E, padw, padw), param['cl_WE_'+si][0]).eval()
     zi += tensor.nnet.conv.conv2d(self.spad2d(R, padw, padw), param['cl_WR_'+si][0]).eval()
     zi += C*(param['cl_WC_'+si][0])
     zi += param['cl_b_'+si][0]
 
     zf = tensor.nnet.conv.conv2d(self.spad2d(E, padw, padw), param['cl_WE_'+si][1]).eval()
     zf += tensor.nnet.conv.conv2d(self.spad2d(R, padw, padw), param['cl_WR_'+si][1]).eval()
     zf += C*(param['cl_WC_'+si][1])
     zf += param['cl_b_'+si][1]
 
     zc = tensor.nnet.conv.conv2d(self.spad2d(E, padw, padw), param['cl_WE_'+si][2]).eval()
     zc += tensor.nnet.conv.conv2d(self.spad2d(R, padw, padw), param['cl_WR_'+si][2]).eval()
     zc += param['cl_b_'+si][2]
 
     zo = tensor.nnet.conv.conv2d(self.spad2d(E, padw, padw), param['cl_WE_'+si][3]).eval()
     zo += tensor.nnet.conv.conv2d(self.spad2d(R, padw, padw), param['cl_WR_'+si][3]).eval()
     zo += C*(param['cl_WC_'+si][3])
     zo += param['cl_b_'+si][3]
 
     if RPP is not None:
         zi += tensor.nnet.conv.conv2d(self.spad2d(RPP, padw, padw), param['cl_WRPP_'+si][0]).eval()
         zf += tensor.nnet.conv.conv2d(self.spad2d(RPP, padw, padw), param['cl_WRPP_'+si][1]).eval()
         zc += tensor.nnet.conv.conv2d(self.spad2d(RPP, padw, padw), param['cl_WRPP_'+si][2]).eval()
         zo += tensor.nnet.conv.conv2d(self.spad2d(RPP, padw, padw), param['cl_WRPP_'+si][3]).eval()
 
     i = self.sigmoid(zi)
     f = self.sigmoid(zf)
     Cnext = f*C + np.tanh(zc)
     o = self.sigmoid(zo)
     Rnext = o * np.tanh(Cnext)
     return (Rnext, Cnext)
Example #9
0
    def check_forward(self, h_data, xs_data, ws_data, bs_data):
        h = chainer.Variable(h_data)
        xs = [chainer.Variable(x) for x in xs_data]
        ws = [[chainer.Variable(w) for w in ws]
              for ws in ws_data]
        bs = [[chainer.Variable(b) for b in bs]
              for bs in bs_data]
        hy, ys = functions.n_step_bigru(
            self.n_layers, self.dropout, h, ws, bs, xs)

        xs_next = self.xs
        e_hy = self.hx.copy()
        for layer in range(self.n_layers):
            # forward
            di = 0
            xf = []
            layer_idx = layer * 2 + di
            w = self.ws[layer_idx]
            b = self.bs[layer_idx]
            for ind in range(self.length):
                x = xs_next[ind]
                batch = x.shape[0]
                h_prev = e_hy[layer_idx, :batch]
                # GRU
                z = sigmoid(x.dot(w[1].T) + h_prev.dot(w[4].T) + b[1] + b[4])
                r = sigmoid(x.dot(w[0].T) + h_prev.dot(w[3].T) + b[0] + b[3])
                h_bar = numpy.tanh(x.dot(w[2].T) +
                                   r *
                                   ((h_prev).dot(w[5].T) + b[5]) + b[2])
                e_h = (1 - z) * h_bar + z * h_prev
                e_hy[layer_idx, :batch] = e_h
                xf.append(e_h)

            # backward
            di = 1
            xb = []
            layer_idx = layer * 2 + di
            w = self.ws[layer_idx]
            b = self.bs[layer_idx]
            for ind in reversed(range(self.length)):
                x = xs_next[ind]
                batch = x.shape[0]
                h_prev = e_hy[layer_idx, :batch]
                # GRU
                z = sigmoid(x.dot(w[1].T) + h_prev.dot(w[4].T) + b[1] + b[4])
                r = sigmoid(x.dot(w[0].T) + h_prev.dot(w[3].T) + b[0] + b[3])
                h_bar = numpy.tanh(x.dot(w[2].T) +
                                   r *
                                   ((h_prev).dot(w[5].T) + b[5]) + b[2])
                e_h = (1 - z) * h_bar + z * h_prev
                e_hy[layer_idx, :batch] = e_h
                xb.append(e_h)
            xb.reverse()
            xs_next = [numpy.concatenate([hfi, hbi], axis=1) for (hfi, hbi) in
                       zip(xf, xb)]

        for k, (ysi, xsi) in enumerate(zip(ys, xs_next)):
            testing.assert_allclose(ysi.data, xsi, rtol=1e-4, atol=1e-4)

        testing.assert_allclose(hy.data, e_hy, rtol=1e-4, atol=1e-4)
    def LSTMtick(x, h_prev, c_prev):
      t = 0

      # setup the input vector
      Hin = np.zeros((1,WLSTM.shape[0])) # xt, ht-1, bias
      Hin[t,0] = 1
      Hin[t,1:1+d] = x
      Hin[t,1+d:] = h_prev

      # LSTM tick forward
      IFOG = np.zeros((1, d * 4))
      IFOGf = np.zeros((1, d * 4))
      C = np.zeros((1, d))
      Hout = np.zeros((1, d))
      IFOG[t] = Hin[t].dot(WLSTM)
      IFOGf[t,:3*d] = 1.0/(1.0+np.exp(-IFOG[t,:3*d]))
      IFOGf[t,3*d:] = np.tanh(IFOG[t, 3*d:])

      # C[t] = IFOGf[t,:d] * IFOGf[t, 3*d:] + IFOGf[t,d:2*d] * c_prev
      C[t] = (IFOGf[t,d:2*d] * IFOGf[t, 3*d:]) - IFOGf[t, 3*d:]+ IFOGf[t,d:2*d] * c_prev

      if tanhC_version:
        Hout[t] = IFOGf[t,2*d:3*d] * np.tanh(C[t])
      else:
        Hout[t] = IFOGf[t,2*d:3*d] * C[t]
      Y = Hout.dot(Wd) + bd
      return (Y, Hout, C) # return output, new hidden, new cell
Example #11
0
 def forward_prop_node(self, node, depth=-1, test=False):
     cost = 0.0
     if node.isLeaf:
         node.c = self.W_in.dot(self.L[:, node.word]) + self.b_in
         node.o = sigmoid(self.W_out.dot(self.L[:, node.word]) + self.b_out)
         node.ct = np.tanh(node.c)
         if not test:
             node.mask = np.random.binomial(1, self.node_keep, self.mem_dim)
             node.hActs1 = node.o * node.ct * node.mask
         else:
             node.hActs1 = node.o * node.ct * self.node_keep
     else:
         cost_left = self.forward_prop_node(node.left, depth - 1)
         cost_right = self.forward_prop_node(node.right, depth - 2)
         cost += (cost_left + cost_right)
         children = np.hstack((node.left.hActs1, node.right.hActs1))
         node.i = sigmoid(self.Ui.dot(children) + self.bi)
         node.f_l = sigmoid(self.Uf_l.dot(children) + self.bf)
         node.f_r = sigmoid(self.Uf_r.dot(children) + self.bf)
         node.o = sigmoid(self.Uo.dot(children) + self.bo)
         node.u = np.tanh(self.Uu.dot(children) + self.bu)
         node.c = node.i * node.u + node.f_l * node.left.c + node.f_r * node.right.c
         node.ct = np.tanh(node.c)
         if not test:
             node.mask = np.random.binomial(1, self.node_keep, self.mem_dim)
             node.hActs1 = node.o * node.ct * node.mask
         else:
             node.hActs1 = node.o * node.ct * self.node_keep
     return cost
    def test_one_step(self):
        h0 = tensor.matrix('h0')
        c0 = tensor.matrix('c0')
        x = tensor.matrix('x')
        h1, c1 = self.lstm.apply(x, h0, c0, iterate=False)
        next_h = theano.function(inputs=[x, h0, c0], outputs=[h1])

        h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
                                   dtype=theano.config.floatX)
        c0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]],
                                   dtype=theano.config.floatX)
        x_val = 0.1 * numpy.array([range(12), range(12, 24)],
                                  dtype=theano.config.floatX)
        W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX)
        W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX)
        W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX)
        W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX)

        # omitting biases because they are zero
        activation = numpy.dot(h0_val, W_state_val) + x_val

        def sigmoid(x):
            return 1. / (1. + numpy.exp(-x))

        i_t = sigmoid(activation[:, :3] + c0_val * W_cell_to_in)
        f_t = sigmoid(activation[:, 3:6] + c0_val * W_cell_to_forget)
        next_cells = f_t * c0_val + i_t * numpy.tanh(activation[:, 6:9])
        o_t = sigmoid(activation[:, 9:12] +
                      next_cells * W_cell_to_out)
        h1_val = o_t * numpy.tanh(next_cells)
        assert_allclose(h1_val, next_h(x_val, h0_val, c0_val)[0],
                        rtol=1e-6)
Example #13
0
    def check_forward(self, c_prev1_data, c_prev2_data, x1_data, x2_data):
        c_prev1 = chainer.Variable(c_prev1_data)
        c_prev2 = chainer.Variable(c_prev2_data)
        x1 = chainer.Variable(x1_data)
        x2 = chainer.Variable(x2_data)
        c, h = functions.slstm(c_prev1, c_prev2, x1, x2)
        self.assertEqual(c.data.dtype, numpy.float32)
        self.assertEqual(h.data.dtype, numpy.float32)

        # Compute expected out
        a1_in = self.x1[:, [0, 4]]
        i1_in = self.x1[:, [1, 5]]
        f1_in = self.x1[:, [2, 6]]
        o1_in = self.x1[:, [3, 7]]
        a2_in = self.x2[:, [0, 4]]
        i2_in = self.x2[:, [1, 5]]
        f2_in = self.x2[:, [2, 6]]
        o2_in = self.x2[:, [3, 7]]

        c_expect = _sigmoid(i1_in) * numpy.tanh(a1_in) + \
            _sigmoid(i2_in) * numpy.tanh(a2_in) + \
            _sigmoid(f1_in) * self.c_prev1 + \
            _sigmoid(f2_in) * self.c_prev2
        h_expect = _sigmoid(o1_in + o2_in) * numpy.tanh(c_expect)

        gradient_check.assert_allclose(c_expect, c.data)
        gradient_check.assert_allclose(h_expect, h.data)
Example #14
0
    def train(self, trainData):
        recordNum = len(trainData)
        trainData = np.hstack(([[1]]*recordNum, trainData))
        self.w1 = np.random.uniform(-self.r, self.r, (self.d + 1, self.M))
        self.w2 = np.random.uniform(-self.r, self.r, (self.M + 1,1))
        for t in xrange(0, self.T):
      #      if t % 10000 == 0:
      #          print t
            n = random.randint(0,recordNum-1)
            x0 = trainData[n,0:3]
            y = trainData[n, 3]
            s1 = np.dot(self.w1.T, x0)
            s1.shape = (self.M,1)
            x =  np.tanh(s1)
            x1 = np.vstack(([1], x))
            x1.shape = (self.M+1,1)
#            print 'w2.shape=',self.w2.shape,self.w2
#            print 'x1.shape=',x1.shape,x1
            s2 = np.dot(self.w2.T, x1)
            yPredict = np.tanh(s2)
            e = y - yPredict
            delta2 = -2 * (y - yPredict) * derivative_tanh(s2)
            x = x1 * delta2
            self.w2 = self.w2 - self.eta * x1 * delta2
#            if t==0:
#                print "delta2.shape=",delta2.shape
#                print "w2.shape=",self.w2.shape
#                print derivative_tanh(s1).shape
#            print 'w2.shape=',self.w2.shape
#            print 's1.shape=',s1.shape
            delta1 = delta2 * self.w2[1:] * derivative_tanh(s1)
            x0.shape = (3,1) 
#            print 'delta1.shape=',delta1.shape
            self.w1 = self.w1 - self.eta * np.dot(x0, delta1.T)
Example #15
0
 def forward_compute(self):
     if self.last_layer.type == CONV_TYPE:
         m = np.size(self.last_layer.images, 0)
         self.images = np.tanh(self.last_layer.images)
         self.value = self.images.reshape(m, self.channel*self.height*self.width)
     else:
         self.value = np.tanh(self.last_layer.value)
Example #16
0
 def input_node(self, xt):
     if self.ht_min_1 != []:
         # If a previous step's activation was given, include it in the calculation
         # of the current step's activation.
         return np.tanh(np.dot(self.Wi, xt) + np.dot(self.W_ht_min_1, self.ht_min_1) + self.bi)
     else:
         return np.tanh(np.dot(self.Wi, xt) + self.bi)
def generate_nonuniform_grid_arctanh(inputFilename,slope,psi0):
    #Calculates psi(psiN), where psiN is uniform and psi is nonuniform
    # psi = h(psiN)
    # h = Atanh(P(psiN)), where P = a2 s**psiN + a1*psiN + a0
    # polynomial determined to make h(psiMin) = psiMin, h(psiMax) = psiMax
    # and have the slope slope at psi0
    inputs = perfectInput(inputFilename)
    Npsi = inputs.Npsi
    psiAHat = inputs.psiAHat #psiAHat for a would-be uniform map
    psiN = inputs.psi #array of uniform grid points
    psiNMin = inputs.psiMin
    psiNMax = inputs.psiMax
    t1 = numpy.tanh(psiNMin)
    t2 = numpy.tanh(psiNMax)
    k = slope
    A = numpy.array([[psiNMin**3,psiNMin**2,psiNMin,1],[psiNMax**3,psiNMax**2,psiNMax,1],[psi0**3,psi0**2,psi0,1],[3*psi0**2,2*psi0,1,0]])
    b = numpy.array([t1,t2,0,k])
    coef=numpy.linalg.solve(A,b)

    h = (lambda psiN: psiAHat*numpy.arctanh(coef[0]*psiN**3 + coef[1]*psiN**2 + coef[2]*psiN+coef[3]))
    dhdpsiN = (lambda psiN: psiAHat*(3*coef[0]*psiN**2 + 2*coef[1]*psiN + coef[2])/(1+(coef[0]*psiN**3 + coef[1]*psiN**2 + coef[2]*psiN+coef[3])**2))

    psi=[h(pN) for pN in psiN] 
    dpsidpsiN= [dhdpsiN(pN) for pN in psiN]

    create_psiAHat_of_Npsi("psiAHat.h5",Npsi,dpsidpsiN,psi)
    
    return (psi,dpsidpsiN)
Example #18
0
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
    """
    Compute the sigmoid kernel between X and Y::

        K(X, Y) = tanh(gamma <X, Y> + coef0)

    Parameters
    ----------
    X : ndarray of shape (n_samples_1, n_features)

    Y : ndarray of shape (n_samples_2, n_features)

    coef0 : int, default 1

    Returns
    -------
    Gram matrix: array of shape (n_samples_1, n_samples_2)
    """
    X, Y = check_pairwise_arrays(X, Y)
    if gamma is None:
        gamma = 1.0 / X.shape[1]

    K = safe_sparse_dot(X, Y.T, dense_output=True)
    K *= gamma
    K += coef0
    np.tanh(K, K)   # compute tanh in-place
    return K
def winf(x_hist):
    pre_u = x_hist[0]
    post_u = x_hist[-1]
    # parameters
    n = len(pre_u)
    vec_pre = 0.5 * (np.ones(n) + np.tanh(a_pre * pre_u + b_pre))
    return (wmax / 2.0) * np.outer((np.ones(n) + np.tanh(a_post * post_u + b_post)), vec_pre)
def calculate_KL(i):
    x=x_test[i]
    y = t_test[i]
    [W1,W2,W3,W4,W5,b1,b2,b3,b4,b5,pi] = params
    num_model = len(pi)
    dimZ = W2.shape[0]/num_model
    h_encoder = np.tanh(np.dot(W1,x) + b1[:,0])
    KL = []
    b=[x for x in range(0,num_model)]
    j = np.random.uniform(0,1)
    pi_soft = np.exp(pi)/np.sum(np.exp(pi))
    i = 0
    for i in range(0,num_model):
        if pi_soft[i] > j :
            break
        else:
            j -= pi_soft[i]
    mu = np.dot(W2[int(i)*dimZ:(1+int(i))*dimZ],h_encoder) + b2[int(i)*dimZ:(1+int(i))*dimZ][0]
    log_sigma = (0.5*(np.dot(W3[int(i)*dimZ:(1+int(i))*dimZ],h_encoder)))+ b3[i*dimZ:(1+i)*dimZ][0]
    k = dimZ
    eps = np.random.normal(0,1,[dimZ,1])
    z = mu + np.exp(log_sigma)*eps[:,0]
    qxz = np.power(2*np.pi,-dimZ/2)*np.exp(np.sum(log_sigma))*np.exp(-0.5*np.dot((z-mu)*(z-mu),np.exp(log_sigma).T))
    pz=np.power(2*np.pi,-dimZ/2)*np.exp(-0.5*np.sum(np.square(z)))
    pz = np.exp(-0.5*np.sum(np.square(z)))
    kl = np.log((qxz+0.0000000000000001)/(pz+0.0000000000000001))
    KL.append(kl)
    h_decoder = np.tanh(np.dot(W4,z) + b4[:,0])
    o = 1/(1+np.exp(-np.dot(W5,h_decoder) + b5[:,0]))
    img = im.fromarray(o.reshape((28,28)))
    #img.save('image.png')
    logpxz = np.sum(x*np.log(o)+(1.0-x)*np.log(1.0-o))
    return np.sum(KL)
Example #21
0
def getImpedance(m1d,sigma,freq):
    """Analytic solution for MT 1D layered earth. Returns the impedance at the surface.

    :param SimPEG.mesh, object m1d: Mesh object with the 1D spatial information.
    :param numpy.ndarray, vector sigma: Physical property corresponding with the mesh.
    :param numpy.ndarray, vector freq: Frequencies to calculate data at.


    """

    # Initiate the impedances
    Z1d = np.empty(len(freq) , dtype='complex')
    h = m1d.hx   #vectorNx[:-1]
    # Start the process
    for nrFr, fr in enumerate(freq):
        om = 2*np.pi*fr
        Zall = np.empty(len(h)+1,dtype='complex')
        # Calculate the impedance for the bottom layer
        Zall[0] = (mu_0*om)/np.sqrt(mu_0*eps_0*(om)**2 - 1j*mu_0*sigma[0]*om)

        for nr,hi in enumerate(h):
            # Calculate the wave number
            # print(nr,sigma[nr])
            k = np.sqrt(mu_0*eps_0*om**2 - 1j*mu_0*sigma[nr]*om)
            Z = (mu_0*om)/k

            Zall[nr+1] = Z *((Zall[nr] + Z*np.tanh(1j*k*hi))/(Z + Zall[nr]*np.tanh(1j*k*hi)))

        #pdb.set_trace()
        Z1d[nrFr] = Zall[-1]

    return Z1d
Example #22
0
File: lstm.py Project: jarano93/Net
    def ff(self, x):
        data = oh.hcol(x, self.x_len)
        self.x = data

        i_arg = np.dot(self.w_x_i, data) + np.dot(self.w_h_i, self.hidden)
        i_arg += np.dot(self.w_c_i, self.cell) + self.w_i
        self.gate_i = sig(i_arg)

        f_arg = np.dot(self.w_x_f, data) + np.dot(self.w_h_f, self.hidden)
        f_arg += np.dot(self.w_c_f, self.cell) + self.w_f
        self.gate_f = sig(f_arg)

        c_arg = np.dot(self.w_x_c, data) + np.dot(self.w_h_c, self.hidden)
        self.c_tan = np.tanh(c_arg + self.w_c)

        self.cell = self.gate_f * self.cell + self.gate_i * self.c_tan

        o_arg = np.dot(self.w_x_o, data) + np.dot(self.w_h_o, self.hidden)
        o_arg += np.dot(self.w_c_o, self.cell) + self.w_o
        self.gate_o = sig(o_arg)

        self.hidden = self.gate_o * np.tanh(self.cell)

        self.p = softmax(self.hidden)
        return np.argmax(self.p)
Example #23
0
def transtanh_(b=None):
    """
    unused fortran translation of Newton-Raphson solver for
    solving the transcendental equation

    b=tanh(delta)/delta
    """
    if (b > 1.0):
        print ' Error form transtanh: Can not be gridded '
        return delta
    rlim=1.e-6
    nmax=100000
    delta=1.0
    delta_old=delta
    for n in range(nmax):
        f=delta / (np.tanh(0.5 * delta) + 1.0 * 10 ** (- 60)) - 2.0 / b
        df=(np.tanh(0.5 * delta) - delta * (1 - np.tanh(0.5 * delta) ** 2)) / (np.tanh(0.5 * delta) ** 2 + 1.0 * 10 ** (- 60))
        delta=delta - 0.5 * f / df
        res = np.abs((delta - delta_old)/ delta_old)
        if res < rlim: return delta
        if ((n == nmax) and ( res > rlim)):
            print ' Convergence problem in transtanh dist. function !!! '
            print ' residual ',((delta - delta_old) / delta_old)
            print 'n= ', n, '  nmax=',nmax, '   delta=', delta, '   delta_old=',delta_old,'  rlim=',rlim
        delta_old= delta
    return delta
Example #24
0
    def test_output_gate(self):
        self.set_weights(self.lstm.right, 1)
        X = np.ones((1, 1))
        S = np.zeros((1, 1))
        H = np.zeros((1, 1))
        step = self.make_lstm_step(self.lstm)

        input_gate = logistic(X)
        state = np.tanh(1) * input_gate
        output_gate = logistic(1)
        out = output_gate * np.tanh(state)
        lstm_out, lstm_state = step(X, H, S)

        np.testing.assert_almost_equal(lstm_out, out)
        np.testing.assert_almost_equal(lstm_state, state)

        Wo = self.lstm.right.get_parameter_value('W_ox')
        self.lstm.right.set_parameter_value('W_ox', Wo * 0)

        input_gate = logistic(X)
        state = np.tanh(1) * input_gate
        output_gate = logistic(0)
        out = output_gate * np.tanh(state)
        lstm_out, lstm_state = step(X, H, S)
        np.testing.assert_almost_equal(lstm_out, out)
        np.testing.assert_almost_equal(lstm_state, state)
Example #25
0
    def classify(self, x):
        # change to TANH!!!!!!
        W1_bias = np.vstack((self.w1_hid, self.b1_hid))
        x1_bias = np.append(x, [1])
        y = np.tanh(np.transpose(np.dot(x1_bias, W1_bias)))

        def squash_y(el):
            if el > 0.999:
                return 0.999
            if el < 0.001:
                return 0.001
            return el

        squash_y = np.vectorize(squash_y)
        x2 = squash_y(y)

        W2_bias = np.vstack((self.w2_hid, self.b2_hid))
        x2_bias = np.append(x2, [1])
        y = np.tanh(np.transpose(np.dot(x2_bias, W2_bias)))
        x3 = squash_y(y)

        W_out_bias = np.vstack((self.w_out, self.b_out))
        x3_bias = np.append(x3, [1])
        y = np.tanh(np.transpose(np.dot(x3_bias, W_out_bias)))

        y = squash_y(y)
        return (x, x2, x3, y)
Example #26
0
    def step_forward(self, x, prev_h, prev_c):
        """
        x: input feature (N, D)
        prev_h: hidden state from the previous timestep (N, H)
        prev_c: previous cell state (N, H)

        self.params[self.wx_name]: input-to-hidden weights (D, 4H)
        self.params[self.wh_name]: hidden-to-hidden weights (H, 4H)
        self.params[self.b_name]: biases (4H,)

        next_h: next hidden state (N, H)
        next_c: next cell state (N, H)

        meta: variables needed for the backward pass
        """
        next_h, next_c, meta = None, None, None
        #############################################################################
        # TODO: Implement the forward pass for a single timestep of an LSTM.        #
        # You may want to use the numerically stable sigmoid implementation above.  #
        #############################################################################
        activation = x.dot(self.params[self.wx_name]) + prev_h.dot(self.params[self.wh_name]) + self.params[self.b_name]
        ai, af, ao, ag = np.hsplit(activation, 4)
        #a[:, :H], a[:, H:2*H], a[:, 2*H:3*H], a[:,3*H:]
        i = sigmoid(ai)
        f = sigmoid(af)
        o = sigmoid(ao)
        g = np.tanh(ag)
        next_c = np.multiply(f, prev_c) + np.multiply(i, g)
        next_h = np.multiply(o, np.tanh(next_c))
        meta = [x, i, f, o, g, prev_c, next_c, prev_h, next_h]
        ##############################################################################
        #                              END OF YOUR CODE                              #
        ##############################################################################
        return next_h, next_c, meta
Example #27
0
	def fprop(self,input):
 		if self.layer_no == 0:
 			self.fprop_in = input
 		else:
 			self.fprop_in = numpy.tanh(input)
 			self.fprop_in = numpy.append(self.fprop_in, numpy.ones(shape=(self.fprop_in.shape[0],1)), 1)
 		return numpy.tanh(numpy.dot(self.fprop_in, self.W))
Example #28
0
 def makeTanh(self, res, a, c, x, val_min, val_max):
     span = val_max - val_min
     x_dist, y_dist = np.mgrid[-x:x:1j*res, 0:res]
     tex = (1 + a * cos(pi*x_dist/2)) * (tanh(x_dist+c) - tanh(x_dist-c))
     tex /= (np.max(tex) / span) #normalize and scale
     tex += val_min #shift to start with val_min instead of 0
     return tex
Example #29
0
def sigmoid_kernel(X, Y, gamma=0, coef0=1):
    """
    Compute the sigmoid kernel between X and Y.

    K(X, Y) = tanh(gamma <X, Y> + coef0)

    Parameters
    ----------
    X: array of shape (n_samples_1, n_features)

    Y: array of shape (n_samples_2, n_features)

    degree: int

    Returns
    -------
    Gram matrix: array of shape (n_samples_1, n_samples_2)
    """
    if gamma == 0:
        gamma = 1.0 / X.shape[1]

    K = linear_kernel(X, Y)
    K *= gamma
    K += coef0
    np.tanh(K, K) # compute tanh in-place
    return K
Example #30
0
  def cue_conceptor(self,
                    pattern,
                    init_washout=100,
                    cue_length=30,
                    lambda_adapt_cue=0.01):
    """
    Cue conceptor in following three stages:
    1. Initial washout
    2. Cueing

    @param pattern: input pattern
    @param init_washout: initial washout length
    @param cue_length: cueing length
    """

    x=np.zeros((self.num_neuron,1))
    for n in xrange(init_washout):
      u=pattern[:,n][None].T
      x=np.tanh(self.W.dot(x)+self.W_in.dot(u)+self.bias)

    C=np.zeros((self.num_neuron, self.num_neuron))

    for n in xrange(cue_length):
      u=pattern[:,n+init_washout][None].T
      x=np.tanh(self.W.dot(x)+self.W_in.dot(u)+self.bias)
      C+=self.adapt_conceptor(C, x, lambda_adapt_cue)

    return C, x
Example #31
0
    lambda features, name=None: np.divide(features, (np.abs(features) + 1)))

sqrt = utils.copy_docstring(tf.math.sqrt, lambda x, name=None: np.sqrt(x))

square = utils.copy_docstring(tf.math.square,
                              lambda x, name=None: np.square(x))

squared_difference = utils.copy_docstring(
    tf.math.squared_difference, lambda x, y, name=None: np.square(x - y))

subtract = utils.copy_docstring(tf.math.subtract,
                                lambda x, y, name=None: np.subtract(x, y))

tan = utils.copy_docstring(tf.math.tan, lambda x, name=None: np.tan(x))

tanh = utils.copy_docstring(tf.math.tanh, lambda x, name=None: np.tanh(x))

top_k = utils.copy_docstring(tf.math.top_k, _top_k)

truediv = utils.copy_docstring(tf.math.truediv,
                               lambda x, y, name=None: np.true_divide(x, y))

# unsorted_segment_max = utils.copy_docstring(
#     tf.math.unsorted_segment_max,
#     lambda data, segment_ids, num_segments, name=None: (
#         np.unsorted_segment_max))

# unsorted_segment_mean = utils.copy_docstring(
#     tf.math.unsorted_segment_mean,
#     lambda data, segment_ids, num_segments, name=None: (
#         np.unsorted_segment_mean))
def tanh(x):
    return np.tanh(x)
Example #33
0
    def backProp(self,node,error=None):

        dc_dsc = np.diag((1-node.hActs1**2).flatten())
        dh_dc = dotW(dc_dsc, np.diag(self.o.flatten()))
        # Inherited error
        if node.parent == None:
            error_at_h = error.astype('float32').squeeze()
            error_at_c = dot(dh_dc, error_at_h)
        if node.parent != None:
            [in_ho, in_hi, in_hu] = error[0:3]
            in_hl = error[3:3+self.paramDim]
            in_hr = error[3+self.paramDim:3+2*self.paramDim]
            in_cc = error[3+2*self.paramDim]
            if node in node.parent.left:
                idx = min(node.idx, self.paramDim-1)
                error_at_h = dot(self.Uo[idx].T, in_ho) + dot(self.Ui[idx].T, in_hi) + dot(self.Uu[idx].T, in_hu)
                for j in range(self.paramDim):
                    error_at_h += dot(self.Ul[j][idx].T, in_hl[j])
                    error_at_h += dot(self.Ur[j][idx].T, in_hr[j])
                error_at_c = dot(np.diag(self.l[idx].flatten()), in_cc) + dot(dh_dc, error_at_h)
            if node in node.parent.right:
                idx = min(node.idx, self.paramDim-1)
                error_at_h = dot(self.Vo[idx].T, in_ho) + dot(self.Vi[idx].T, in_hi) + dot(self.Vu[idx].T, in_hu)
                for j in range(self.paramDim):
                    error_at_h += dot(self.Vl[j][idx].T, in_hl[j])
                    error_at_h += dot(self.Vr[j][idx].T, in_hr[j])
                error_at_c = dot(np.diag(self.r[idx].flatten()), in_cc) + dot(dh_dc, error_at_h)
        # Error passed to children
        # o
        do_dso = np.diag(np.multiply(self.o, 1-self.o).flatten())
        dh_dso = dotW(do_dso, np.diag(np.tanh(node.hActs1).flatten()))
        # i
        di_dsi = np.diag(np.multiply(self.i, 1-self.i).flatten())
        dc_dsi = dotW(di_dsi, np.diag(self.u.flatten()))
        # u
        du_dsu = np.diag((1-self.u**2).flatten())
        dc_dsu = dotW(du_dsu, np.diag(self.i.flatten()))
        if not node.isLeaf:
            # l
            dl_dsl = []
            dc_dsl = []
            for j in range(self.paramDim):
                dc_dsl.append(np.zeros((self.middleDim, self.middleDim), dtype='float32'))
            for j in range(self.paramDim):
                dl_dsl.append(np.diag(np.multiply(self.l[j], 1-self.l[j]).flatten()))
            for j in node.left:
                idx = min(j.idx, self.paramDim-1)
                dc_dsl[idx] += dotW(dl_dsl[idx], np.diag(j.hActs1.flatten()))
            # r
            dr_dsr = []
            dc_dsr = []
            for j in range(self.paramDim):
                dc_dsr.append(np.zeros((self.middleDim, self.middleDim), dtype='float32'))
            for j in range(self.paramDim):
                dr_dsr.append(np.diag(np.multiply(self.r[j], 1-self.r[j]).flatten()))
            for j in node.right:
                idx = min(j.idx, self.paramDim-1)
                dc_dsr[idx] += dotW(dr_dsr[idx], np.diag(j.hActs1.flatten()))

            # Error out
            dJ_dso = dot(dh_dso, error_at_h)
            dJ_dsi = dot(dc_dsi, error_at_c)
            dJ_dsu = dot(dc_dsu, error_at_c)
            dJ_dsl = []
            dJ_dsr = []
            for j in range(self.paramDim):
                dJ_dsl.append(dot(dc_dsl[j], error_at_c))
                dJ_dsr.append(dot(dc_dsr[j], error_at_c))
            out_cc = error_at_c
            error_out = [dJ_dso, dJ_dsi, dJ_dsu]
            for j in range(self.paramDim):
                error_out.append(dJ_dsl[j])
            for j in range(self.paramDim):
                error_out.append(dJ_dsr[j])
            error_out.append(out_cc)
        # Parameter Gradients
        if not node.isLeaf:
            x = np.reshape(self.L[node.word,:], (self.wvecDim, 1))
            # Bias
            self.dbo += dJ_dso.flatten()
            self.dbi += dJ_dsi.flatten()
            self.dbu += dJ_dsu.flatten()
            for j in range(self.paramDim):
                self.dbf += dJ_dsl[j].flatten()
                self.dbf += dJ_dsr[j].flatten()
            # Us
            for j in node.left:
                idx = min(j.idx, self.paramDim-1)
                self.dUo[idx] += dotW(dJ_dso[:,None], j.hActs2[None,:])
                self.dUi[idx] += dotW(dJ_dsi[:,None], j.hActs2[None,:])
                self.dUu[idx] += dotW(dJ_dsu[:,None], j.hActs2[None,:])
                for k in range(self.paramDim):
                    self.dUl[k][idx] += dotW(dJ_dsl[k][:,None], j.hActs2[None,:])
                    self.dUr[k][idx] += dotW(dJ_dsr[k][:,None], j.hActs2[None,:])
            # Vs
            for j in node.right:
                idx = min(j.idx, self.paramDim-1)
                self.dVo[idx] += dotW(dJ_dso[:,None], j.hActs2[None,:])
                self.dVi[idx] += dotW(dJ_dsi[:,None], j.hActs2[None,:])
                self.dVu[idx] += dotW(dJ_dsu[:,None], j.hActs2[None,:])
                for k in range(self.paramDim):
                    self.dVl[k][idx] += dotW(dJ_dsl[k][:,None], j.hActs2[None,:])
                    self.dVr[k][idx] += dotW(dJ_dsr[k][:,None], j.hActs2[None,:])
            # Ws
            self.dWo += dotW(dJ_dso[:,None], x.T)
            self.dWu += dotW(dJ_dsu[:,None], x.T)
            self.dWi += dotW(dJ_dsi[:,None], x.T)
            for j in range(self.paramDim):
                self.dWf += dotW(dJ_dsl[j][:,None], x.T)
                self.dWf += dotW(dJ_dsr[j][:,None], x.T)
            # L
            temp = dot(self.Wo.T, dJ_dso).flatten() + dot(self.Wi.T, dJ_dsi).flatten() + dot(self.Wu.T, dJ_dsu).flatten()
            for j in range(self.paramDim):
                temp += dot(self.Wf.T, dJ_dsl[j]).flatten()
                temp += dot(self.Wf.T, dJ_dsr[j]).flatten()
            self.dL[node.word] = temp

            # Recursion
            for j in node.left:
                self.backProp(j, error_out)
            for j in node.right:
                self.backProp(j, error_out)
        else:
            x = np.reshape(self.L[node.word,:], (self.wvecDim, 1))
            dJ_dso = dot(dh_dso, error_at_h)
            dJ_dsi = dot(dc_dsi, error_at_c)
            dJ_dsu = dot(dc_dsu, error_at_c)
            # Bias
            self.dbo += dJ_dso.flatten()
            self.dbi += dJ_dsi.flatten()
            self.dbu += dJ_dsu.flatten()
            # Ws
            self.dWo += dotW(dJ_dso[:,None], x.T)
            self.dWi += dotW(dJ_dsi[:,None], x.T)
            self.dWu += dotW(dJ_dsu[:,None], x.T)
            # L
            self.dL[node.word] = dot(self.Wo.T, dJ_dso).flatten() + dot(self.Wi.T, dJ_dsi).flatten() + dot(self.Wu.T, dJ_dsu).flatten()
Example #34
0
    def forwardProp(self,node):
        cost  =  total = 0.0
        # this is exactly the same setup as forwardProp in rnn.py
        x = np.reshape(self.L[node.word,:], (self.wvecDim, 1)).squeeze()
        if node.isLeaf:
            self.i = sigmoid(dot(self.Wi, x)+self.bi)
            self.o = sigmoid(dot(self.Wo, x)+self.bo)
            self.u = np.tanh(dot(self.Wu, x)+self.bu)
            node.hActs1 = np.multiply(self.i, self.u)
        else:
            for j in node.left:
                total += self.forwardProp(j)
            for j in node.right:
                total += self.forwardProp(j)
            si = add(dot(self.Wi, x),self.bi)
            for j in node.left:
                idx = min(j.idx, self.paramDim-1)
                si += dot(self.Ui[idx], j.hActs2)
            for j in node.right:
                idx = min(j.idx, self.paramDim-1)
                si += dot(self.Vi[idx], j.hActs2)
            self.i = sigmoid(si)

            su = add(dot(self.Wu, x),self.bu)
            for j in node.left:
                idx = min(j.idx, self.paramDim-1)
                su += dot(self.Uu[idx], j.hActs2)
            for j in node.right:
                idx = min(j.idx, self.paramDim-1)
                su += dot(self.Vu[idx], j.hActs2)
            self.u = np.tanh(su)

            so = add(dot(self.Wo, x),self.bo)
            for j in node.left:
                idx = min(j.idx, self.paramDim-1)
                so += dot(self.Uo[idx], j.hActs2)
            for j in node.right:
                idx = min(j.idx, self.paramDim-1)
                so += dot(self.Vo[idx], j.hActs2)
            self.o = sigmoid(so)

            temp = add(dot(self.Wf, x),self.bf)
            sl = np.zeros((self.middleDim), dtype='float32')
            sr = np.zeros((self.middleDim), dtype='float32')

            for j in range(self.paramDim):
                sl *= 0
                sl += temp
                for k in node.left:
                    idx2 = min(k.idx, self.paramDim-1)
                    sl += dot(self.Ul[j][idx2], k.hActs2)
                for k in node.right:
                    idx2 = min(k.idx, self.paramDim-1)
                    sl += dot(self.Vl[j][idx2], k.hActs2)
                self.l[j] = sigmoid(sl)

            for j in range(self.paramDim):
                sr *= 0
                sr += temp
                for k in node.left:
                    idx2 = min(k.idx, self.paramDim-1)
                    sr += dot(self.Ur[j][idx2], k.hActs2)
                for k in node.right:
                    idx2 = min(k.idx, self.paramDim-1)
                    sr += dot(self.Vr[j][idx2], k.hActs2)
                self.r[j] = sigmoid(sr)

            node.hActs1 = np.multiply(self.i, self.u)
            for j in node.left:
                idx = min(j.idx, self.paramDim-1)
                node.hActs1 += np.multiply(self.l[idx], j.hActs1)
            for j in node.right:
                idx = min(j.idx, self.paramDim-1)
                node.hActs1 += np.multiply(self.r[idx], j.hActs1)
        node.hActs2 = np.multiply(self.o, tanh(node.hActs1))
        return total + 1
Example #35
0
 def forward(self, x):
     self.last_input = x
     return np.tanh(x)
Example #36
0
 def tanh(self):
     if self.autograd:
         return Tensor(np.tanh(self.data),autograd=True,creators=[self],creation_op='tanh')
     return Tensor(np.tanh(self.data))
Example #37
0
 def _logistic(self, x):
     '''直接使用1.0 / (1.0 + np.exp(-x))容易发警告“RuntimeWarning: overflowencountered in exp”,
        转换成如下等价形式后算法会更稳定
     '''
     return 0.5 * (1 + np.tanh(0.5 * x))
    def tanh(self, Z):

        A = np.tanh(Z)
        cache = {}
        cache["Z"] = Z
        return A, cache
Example #39
0
def sigmoid(x):
    return (numpy.tanh(x / 2) + 1) / 2
    def tanh_der(self, dA, cache):

        Z = cache["Z"]
        dZ = dA * (1 - np.tanh(Z)**2)
        return dZ
Example #41
0
def gfunc(x):
    gx = np.tanh(x)
    dg = 1 - gx**2
    return gx, dg
def E_QHO_avg_theo(beta):
    """Uso: devuelve valor de energía interna para el QHO unidimensional"""
    return 0.5 / np.tanh(0.5 * beta)
Example #43
0
def fun_ed_aimless(s, d0, r):
    # CC1/2 fitting equation used in Aimless, suggested by Ed Pozharski
    return 0.5 * (1 - numpy.tanh((s - d0) / r))
Example #44
0
def tanh(x, derive=False):
    if derive:
        return np.power(1 / np.cosh(x), 2)
    return np.tanh(x)
Example #45
0
 def tanh_deriv(self, s):
     return 1.0 - np.tanh(s)**2
Example #46
0
"""
tanh
~~~~
Plots a graph of the tanh function."""

import numpy as np
import matplotlib.pyplot as plt

z = np.arange(-5, 5, .1)
t = np.tanh(z)

fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, t)
ax.set_ylim([-1.0, 1.0])
ax.set_xlim([-5,5])
ax.grid(True, which='major')
#grid x-axis line color
a = ax.get_xgridlines()
b = a[3]
b.set_color('black')
b.set_linewidth(1.5)
#grid y-axis line color
c = ax.get_ygridlines()
d = c[4]
d.set_color('black')
d.set_linewidth(1.5)
#labels
ax.set_xlabel('z')
ax.set_title('tanh function')
Example #47
0
 def __call__(self, x):
     return np.log(abs(self.p*np.tanh(self.q*x*np.cos(self.r*x))))
Example #48
0
 def forward_cpu(self, x):
     y = utils.force_array(numpy.tanh(x[0]))
     self.retain_outputs((0,))
     self._use_cudnn = False
     return y,
Example #49
0
 def __activation_derivative(self, X):
     return 1.0 - np.tanh(X)**2  #sigmoid-derivative: X * (1-X)
Example #50
0
 def forwardPropagate(self, X):
     self.Z1 = np.add(np.matmul(self.W1, X), self.b1)
     self.A1 = np.tanh(self.Z1)
     self.Z2 = np.add(np.matmul(self.W2, self.A1), self.b2)
     self.A2 = self.sigmoid(self.Z2)
     return self.A2
Example #51
0
def test_ampere_yee2D(path):

    layout = gridlayout.GridLayout()  # yee layout

    tv = TestVariables()

    Bx = np.zeros([
        layout.allocSize(tv.interpOrder, tv.BxCentering[0], tv.nbrCells[0]),
        layout.allocSize(tv.interpOrder, tv.BxCentering[1], tv.nbrCells[1])
    ],
                  dtype=np.float64)
    By = np.zeros([
        layout.allocSize(tv.interpOrder, tv.ByCentering[0], tv.nbrCells[0]),
        layout.allocSize(tv.interpOrder, tv.ByCentering[1], tv.nbrCells[1])
    ],
                  dtype=np.float64)
    Bz = np.zeros([
        layout.allocSize(tv.interpOrder, tv.BzCentering[0], tv.nbrCells[0]),
        layout.allocSize(tv.interpOrder, tv.BzCentering[1], tv.nbrCells[1])
    ],
                  dtype=np.float64)

    Jx = np.zeros([
        layout.allocSize(tv.interpOrder, tv.JxCentering[0], tv.nbrCells[0]),
        layout.allocSize(tv.interpOrder, tv.JxCentering[1], tv.nbrCells[1])
    ],
                  dtype=np.float64)
    Jy = np.zeros([
        layout.allocSize(tv.interpOrder, tv.JyCentering[0], tv.nbrCells[0]),
        layout.allocSize(tv.interpOrder, tv.JyCentering[1], tv.nbrCells[1])
    ],
                  dtype=np.float64)
    Jz = np.zeros([
        layout.allocSize(tv.interpOrder, tv.JzCentering[0], tv.nbrCells[0]),
        layout.allocSize(tv.interpOrder, tv.JzCentering[1], tv.nbrCells[1])
    ],
                  dtype=np.float64)
    w1 = np.zeros_like(Jz)
    w2 = np.zeros_like(Jz)

    psi_p_X = layout.physicalStartIndex(tv.interpOrder, 'primal')
    pei_p_X = layout.physicalEndIndex(tv.interpOrder, 'primal', tv.nbrCells[0])
    psi_p_Y = layout.physicalStartIndex(tv.interpOrder, 'primal')
    pei_p_Y = layout.physicalEndIndex(tv.interpOrder, 'primal', tv.nbrCells[1])

    psi_d_X = layout.physicalStartIndex(tv.interpOrder, 'dual')
    pei_d_X = layout.physicalEndIndex(tv.interpOrder, 'dual', tv.nbrCells[0])
    psi_d_Y = layout.physicalStartIndex(tv.interpOrder, 'dual')
    pei_d_Y = layout.physicalEndIndex(tv.interpOrder, 'dual', tv.nbrCells[1])

    nbrGhost_p = layout.nbrGhosts(tv.interpOrder, 'primal')
    nbrGhost_d = layout.nbrGhosts(tv.interpOrder, 'dual')

    x_dual = tv.meshSize[0] * np.arange(
        layout.allocSize(tv.interpOrder, 'dual', tv.nbrCells[0])
    ) - tv.meshSize[0] * nbrGhost_d + tv.meshSize[0] * 0.5
    y_dual = tv.meshSize[1] * np.arange(
        layout.allocSize(tv.interpOrder, 'dual', tv.nbrCells[1])
    ) - tv.meshSize[1] * nbrGhost_d + tv.meshSize[1] * 0.5
    x_primal = tv.meshSize[0] * np.arange(
        layout.allocSize(tv.interpOrder, 'primal',
                         tv.nbrCells[0])) - tv.meshSize[0] * nbrGhost_p
    y_primal = tv.meshSize[1] * np.arange(
        layout.allocSize(tv.interpOrder, 'primal',
                         tv.nbrCells[1])) - tv.meshSize[1] * nbrGhost_p

    Bx = np.tensordot(np.cos(2 * np.pi / tv.domainSize[0] * x_primal),
                      np.sin(2 * np.pi / tv.domainSize[1] * y_dual),
                      axes=0)
    By = np.tensordot(np.cos(2 * np.pi / tv.domainSize[0] * x_dual),
                      np.tanh(2 * np.pi / tv.domainSize[1] * y_primal),
                      axes=0)
    Bz = np.tensordot(np.sin(2 * np.pi / tv.domainSize[0] * x_dual),
                      np.tanh(2 * np.pi / tv.domainSize[1] * y_dual),
                      axes=0)

    # Jx =  dyBz
    # Jy = -dxBz
    # Jz =  dxBy - dyBx
    Jx[:,
       psi_p_Y:pei_p_Y + 1] = (Bz[:, psi_d_Y:pei_d_Y + 2] -
                               Bz[:, psi_d_Y - 1:pei_d_Y + 1]) / tv.meshSize[1]
    Jy[psi_p_X:pei_p_X +
       1, :] = -(Bz[psi_d_X:pei_d_X + 2, :] -
                 Bz[psi_d_X - 1:pei_d_X + 1, :]) / tv.meshSize[0]
    w1[psi_p_X:pei_p_X +
       1, :] = (By[psi_d_X:pei_d_X + 2, :] -
                By[psi_d_X - 1:pei_d_X + 1, :]) / tv.meshSize[0]
    w2[:, psi_p_Y:pei_p_Y +
       1] = -(Bx[:, psi_d_Y:pei_d_Y + 2] -
              Bx[:, psi_d_Y - 1:pei_d_Y + 1]) / tv.meshSize[1]
    Jz = w1 + w2

    filename_jx = "jx_yee_2D_order1.txt"
    filename_jy = "jy_yee_2D_order1.txt"
    filename_jz = "jz_yee_2D_order1.txt"

    np.savetxt(os.path.join(path, filename_jx), Jx.flatten('C'), delimiter=" ")
    np.savetxt(os.path.join(path, filename_jy), Jy.flatten('C'), delimiter=" ")
    np.savetxt(os.path.join(path, filename_jz), Jz.flatten('C'), delimiter=" ")
 def eval(self, values, x):
     values[0] = (1.0 - np.tanh(x / (2.0 * np.sqrt(2.0 * lmbda)))) / 2.0
pi = np.pi 
omega = 1
kappa = 1
Nt = 4
T = np.linspace(0, np.pi/omega, Nt)
eta = np.linspace(0, 2*np.pi/kappa, 300)
xi = np.linspace(-1, 1, 299)
epsilon = 0.3

gamma = np.sqrt(1j*omega/Sc)

u_x  = np.zeros((len(T), len(xi), len(eta)), dtype="complex")
u_y  = np.zeros((len(T), len(xi), len(eta)), dtype="complex")

kappa_prime = np.sqrt(1j*omega/Sc + kappa*kappa)
P1 = (gamma*F0*np.tanh(gamma)/(kappa*np.cosh(kappa)))/(1-kappa_prime*np.tanh(kappa)/(kappa*np.tanh(kappa_prime)))

for t in range(len(T)):
	for x in range(len(xi)):
		u_x[t,x,:]    = epsilon*np.sin(kappa*eta)*(  (P1*kappa*np.cosh(kappa)/(gamma*gamma))*(np.cosh(kappa*xi[x])/np.cosh(kappa) - np.cosh(kappa_prime*xi[x])/np.cosh(kappa_prime))   + (F0*np.tanh(gamma)/(gamma))*(np.cosh(kappa_prime*xi[x])/np.cosh(kappa_prime) - xi[x]*np.sinh(gamma*xi[x])/np.sinh(gamma))  )
		u_x[t,x,:]   += F0*(1-np.cosh(gamma*xi[x])/np.cosh(gamma))/(gamma*gamma)
		u_x[t,x,:]   *= np.exp(1j*omega*T[t])

		u_y[t, x, :] = (np.exp(1j*omega*T[t])*np.cos(kappa*eta)*kappa*P1*np.sinh(kappa)/(gamma*gamma))*( np.sinh(kappa_prime*xi[x])/np.sinh(kappa_prime) - np.sinh(kappa*xi[x])/np.sinh(kappa))

#test boundary conditions
"""
for i in range(len(T)):
	plt.plot(xi, np.real(u_y[i, :, 150]))
	plt.plot(xi, np.real(u_y2[i, :, 150]))
	plt.show()
Example #54
0
 def __activation(self, X):
     return np.tanh(X)  #sigmoid: 1/(1 + np.exp(-X)
Example #55
0
def tanh_deriv(x):
    return 1.0 - np.tanh(x) * np.tanh(x)
Example #56
0
def func_inter_dot_coupling(xdata, offset: float, slope: float, height: float,
                            position: float, width: float):
    return offset + slope * xdata + .5 * height * (1 + np.tanh(
        (xdata - position) / width))
Example #57
0
 def tanh(self, x, derivative=False):
     if derivative == False:
         return np.tanh(x)
     else:
         tanh_x = self.tanh(x)
         return (1 - np.square(tanh_x))
Example #58
0
def activation(x):
    return np.tanh(x)
Example #59
0
def tanh(x, Derivative=False):
    if not Derivative:
        return np.tanh(x)
    else:
        return 1.0 - np.tanh(x)**2
Example #60
0
 def _tanh(x):
     return np.tanh(x)