Example #1
0
 def testName(self):
     
     # read data
     data = reader.read("../data/log_1_fixed.txt", jstartline=15000, maxlines=5000)
             
     # preprocess data
     preprocess.preproc(data)
     
     # initialize model
     layers = [13, 8, 1]
     activf = [activation.linear(), activation.tanh(), activation.sigmoid()] #  ,  activation.tanh(1.75, 3./2.),
     net = ffnet.FFNet(layers, activf)
     net.initw(0.1) 
     
     # create training options
     opts = trainb.options()
     
     # write function
     f = open("../output/trainb_test.txt", "w+")
     writefcn = lambda s: f.write(s)
             
     # training
     trainb.train(data, opts, net, writefcn)
     
     # close file
     f.close()
Example #2
0
    def testName(self):

        # read data
        data = reader.read("../data/log_1_fixed.txt",
                           jstartline=15000,
                           maxlines=5000)

        # preprocess data
        preprocess.preproc(data)

        # initialize model
        layers = [13, 8, 1]
        activf = [
            activation.linear(),
            activation.tanh(),
            activation.sigmoid()
        ]  # activation.tanh(1.75, 3./2.),
        net = ffnet.FFNet(layers, activf)
        net.initw(0.1)

        # create training options
        opts = trainsg.options()

        # write function
        f = open("../output/trainsg_test.txt", "w+")
        writefcn = lambda s: f.write(s)

        # training
        trainsg.train(data, opts, net, writefcn)

        # close file
        f.close()
    def activation_forward(self, A_prev, W, b, activation):

        if activation == 'sigmoid':
            Z, linear_cache = self.linear_forward(A_prev, W, b)
            A, activation_cache = sigmoid(Z)

        elif activation == "tanh":
            Z, linear_cache = self.linear_forward(A_prev, W, b)
            A, activation_cache = tanh(Z)

        elif activation == "relu":
            Z, linear_cache = self.linear_forward(A_prev, W, b)
            A, activation_cache = relu(Z)

        elif activation == "leaky_relu":
            Z, linear_cache = self.linear_forward(A_prev, W, b)
            A, activation_cache = leaky_relu(Z)

        else:
            print('no activation function')

        assert (A.shape == (W.shape[0], A_prev.shape[1]))
        cache = (linear_cache, activation_cache)

        return A, cache
Example #4
0
 def activation_function(self, Z):
     if self.activation_function_name == 'relu':
         return relu(Z)
     elif self.activation_function_name == 'sigmoid':
         return sigmoid(Z)
     else:
         return tanh(Z)
Example #5
0
    def test_tanh():
        """Test tanh activation function"""

        x = np.array([[0, 1, 3], [-1, 0, -5], [1, 0, 3], [10, -9, -7]])

        y = np.array([[0, 0.76159, 0.99505], [-0.76159, 0, -0.99991],
                      [0.76159, 0, 0.99505], [1.00000, -1.00000, -1.00000]])

        assert np.allclose(tanh(x), y, atol=0.00001)
Example #6
0
    def test_tanh_deriv():
        """Test tanh activation function derivative"""

        x = np.array([[0, 1, 3], [-1, 0, -5], [1, 0, 3], [10, -9, -7]])

        y = np.array([[1, 0.41998, 0.00988], [0.41998, 1, 0.00018],
                      [0.41998, 1, 0.00988], [0, 0, -0]])

        assert np.allclose(tanh(x, deriv=True), y, atol=0.0001)
Example #7
0
def linear_activation_forward(A_prev, W, b, activation):
    Z = np.dot(W, A_prev) + b
    if activation == "sigmoid":
        A = sigmoid(Z)
    elif activation == "relu":
        A = relu(Z)
    elif activation == "tanh":
        A = tanh(Z)
    return A, Z
    def step(z_t, zi_t, zf_t, zo_t, c_tm1, h_tm1):
        # new information
        Z_t = tanh(z_t + T.dot(h_tm1, U))

        # input gate
        Zi_t = sigmoid(zi_t + T.dot(h_tm1, Ui) + T.dot(c_tm1, Vi))

        # forget gate
        Zf_t = sigmoid(zf_t + T.dot(h_tm1, Uf) + T.dot(c_tm1, Vf))

        # new plus old/unforgetten memory
        c_t = Z_t * Zi_t + c_tm1 * Zf_t

        # output gate
        Zo_t = sigmoid(zo_t + T.dot(h_tm1, Uo) + T.dot(c_t, Vo))

        # output information
        h_t = tanh(c_t) * Zo_t

        return c_t, h_t
Example #9
0
    def test_apply(self):

        # create a simple network
        net = ffnet.FFNet(
            [k, q, m],
            [activation.linear(),
             activation.tanh(),
             activation.sigmoid()])

        # some input
        x = [1] * k
        net.apply(x)
Example #10
0
 def test_tanh(self):
     
     tanh = activation.tanh(-2.7, -10.4)
     
     x = 22.2
     dx = 1.e-7
     
     f = tanh.f(x)
     
     df_a = tanh.df(f)
     df_n = (tanh.f(x + dx) - tanh.f(x - dx)) / 2 / dx
     
     print "Numerical: %e, Analytical: %e" % (df_a, df_n)
     
     self.assertAlmostEqual(df_a, df_n, 8, "Numerical derivative is not equal to analytical") 
Example #11
0
def feedForward(self, inputs):
    #convolution layer one
    freature_maps = co.convoluteOne(self, inputs) #gets the filtered receptive field
    max_pools = co.poolOne(feature_maps) #reduce the size of the image using max pooling
    fully_connected = max_pools.reshape((40,))
    # preparing for the activation, gets all the inputs not including the bias
    for i in tange(self.input - 1): #-1 skips the bias
        self.aIn[i] = fully_connected[i]: #the last element (bias) sits there unchanged at the end in self.ai
    sum = np.dot(self.wIn.T, self.aIn)#sum of all the weights to each layer
    self.ah = a.tanh(sum) #runs the layers through the activation
    
    # output dot products and activations
    sum = np.dot(self.wOut.T, self.aHid)#sum of the layers to each input
    self.aOut = a.sigmoid(sum) #runs the output through the activation (think about adding options to change this)
    return self.aOut #returns the activation summed outputs
Example #12
0
def single_layer_fp(X, W, b, activation="sigmoid"):
    l = []
    for i in range(0, X.shape[1]):
        l.append(1)
    A = np.dot(W, X) + np.outer(b, np.array(l))
    if activation == "linear":
        S = act_fun.linear(A)
    elif activation == "sigmoid":
        S = act_fun.sigmoid(beta, A)
    elif activation == "tanh":
        S = act_fun.tanh(beta, A)
    elif activation == "relu":
        S = act_fun.relu(A)
    elif activation == "softplus":
        S = act_fun.softplus(A)
    elif activation == "elu":
        S = act_fun.elu(delta, A)
    elif activation == "softmax":
        S = act_fun.softmax(A)
    else:
        print("Activation function isn't supported")
    return (A, S)
Example #13
0
def train(k, data):
    
    # initialize new model
    layers = [13, 8, 1]
    activf = [activation.linear(), activation.tanh(), activation.sigmoid()]  
    net = ffnet.FFNet(layers, activf)
    net.initw(0.1) 
    
    # use default training options
    opts = trainsg.options()
    opts.rate = 2.e-4
    
    # write function
    f = open("../output/train-%s.txt" % k, "w+")
    writefcn = lambda s: f.write(s)

    # training
    net = trainsg.train(data, opts, net, writefcn)

    # close file
    f.close()
    
    # return trained network
    return net
Example #14
0
def train(k, data):

    # initialize new model
    layers = [13, 8, 1]
    activf = [activation.linear(), activation.tanh(), activation.sigmoid()]
    net = ffnet.FFNet(layers, activf)
    net.initw(0.1)

    # use default training options
    opts = trainsg.options()
    opts.rate = 2.e-4

    # write function
    f = open("../output/train-%s.txt" % k, "w+")
    writefcn = lambda s: f.write(s)

    # training
    net = trainsg.train(data, opts, net, writefcn)

    # close file
    f.close()

    # return trained network
    return net
Example #15
0
class Test(unittest.TestCase):

    seed = 1.0

    random.seed(seed)
    
    ninp = 10       # number of features
    nhid = 8        # number of hidden units
    nq = 5         # number of samples in a query
        
    # generated query data 
    query = [ [0, random.choice([0, 1])] + [random.random() for _ in xrange(ninp)] for _ in xrange(nq) ]
                 
    # neural network model
    model = ffnet.FFNet([ninp, nhid, 1], [activation.linear(), activation.tanh(1.75, 3./2.), activation.sigmoid()]) 
        
    # random weights
    model.initw(1.0, seed)   
    w = model.getw()
    
    # ranknet sigma
    sigma = 1.0
            
    def test_s(self):
        self.assertEqual(1, ranknet.S(1, 0))
        self.assertEqual(0, ranknet.S(1, 1))
        self.assertEqual(0, ranknet.S(0, 0))
        self.assertEqual(-1, ranknet.S(0, 1))
       
    def test_cost(self):
               
        C = ranknet.cost(self.query, self.model, self.sigma)
        print C

    def test_gradient(self):
        
        # analytical gradient
        dbpr = ranknet.gradient(self.query, self.model, self.sigma)
        
        # numerical gradient
        dw = 1.e-5
        jw = 7
        
        w_u = self.w[:]
        w_u[jw] = w_u[jw] + dw
        self.model.setw(w_u)
        C_u = ranknet.cost(self.query, self.model, self.sigma)
        
        w_d = self.w[:]
        w_d[jw] = w_d[jw] - dw
        self.model.setw(w_d)
        C_d = ranknet.cost(self.query, self.model, self.sigma)
        
        dnum = (C_u[0] - C_d[0]) / dw / 2.0
        
        print dbpr[jw], dnum
    
    def test_gradient_2(self):
        
        # run tests
        ntest = 100
        ninp = 10
        nq = 20
        nhid = self.nhid
        nw = (ninp + 1) * nhid + (nhid + 1) * 1     # total number of weights
        dw = 1.e-6
               
        for j in xrange(ntest):
            
            # generate query data
            query = [ [0, random.choice([0, 1])] + [random.random() for _ in xrange(ninp)] for _ in xrange(nq) ]
            
            # get analytical gradient
            grad = ranknet.gradient(query, self.model, self.sigma)
                        
            # select weight at random
            jw = random.choice(xrange(nw))
                
            # numerical derivative                
            w_u = self.w[:]
            w_u[jw] = w_u[jw] + dw
            self.model.setw(w_u)
            C_u = ranknet.cost(query, self.model, self.sigma)
        
            w_d = self.w[:]
            w_d[jw] = w_d[jw] - dw
            self.model.setw(w_d)
            C_d = ranknet.cost(query, self.model, self.sigma)
        
            dnum = (C_u[0] - C_d[0]) / dw / 2.0
                
            # compare results
            #self.assertAlmostEqual(grad[jw], dnum, 5, "Run %d: %e %e " % (j, grad[jw], dnum))

            print j, jw, grad[jw], dnum
            
    def xtest_training_1(self):
        
        # trainsg on a single query
        
        nepoch = 10000    # number of training epochs
        rate = 0.1        # learning rate
        nprint = 1000     # print frequency
                
        for je in xrange(nepoch):
            
            # compute current cost and estimations
            C = ranknet.cost(self.query, self.model, self.sigma)
            if je % nprint == 0:
                print je, C[0], C[1], C[2]
                print "w:", self.model.getw() 
            # compute gradients
            g = ranknet.gradient(self.query, self.model, self.sigma)
        
            # update weights
            w = la.vsum( self.model.getw(), la.sax(-rate, g) )
            self.model.setw(w)
        
    def xtest_training_2(self): 
        
        # trainsg on several queries
        data = []
        d = range(10)
        for j in d:
            data.append( [ [j, random.choice([0, 1])] + [random.random() for _ in xrange(self.ninp)] for _ in xrange(self.nq) ] )
        
        print data
                
        nepoch = 10000    # number of training epochs
        rate = 0.1        # learning rate
        nprint = 1000     # print frequency
        
        # compute current cost and estimations
        for je in xrange(nepoch):
            
            # select training sample at random
            jq = random.choice(d)   
            
            if je % nprint == 0:
                
                # compute cost of a first sample
                C = ranknet.cost(data[0], self.model, self.sigma)
                
                print je, C[0], C[1], C[2]
                print "w:", self.model.getw() 
            
            # compute gradients
            g = ranknet.gradient(data[jq], self.model, self.sigma)
        
            # update weights
            w = la.vsum( self.model.getw(), la.sax(-rate, g) )
            self.model.setw(w)
    
        # final report
        for query in data:
            print "Query: ", query[0][0]
            C = ranknet.cost(query, self.model, self.sigma)
            for j in xrange(len(query)):
                print query[j][1], C[1][j]
Example #16
0
    def test_backprop(self):

        #===================================
        # Gradient check
        #===================================

        # create a simple network
        net = ffnet.FFNet([k, q, m], [
            activation.linear(),
            activation.sigmoid(1.2),
            activation.tanh(2.0, 3.0 / 2.0)
        ])

        # set random weights
        wscale = 3.0
        w = net.getw()
        w = [random.uniform(-wscale, wscale) for _ in w]
        net.setw(w)

        # run tests
        ntest = 1000

        for j in xrange(ntest):

            # generate random input vector
            xscale = 5.0
            x = [random.uniform(-xscale, xscale) for _ in xrange(k)]

            # propagate forward
            net.apply(x)

            # select a weight at random
            N = (k + 1) * q + (q + 1) * m  # total number of weights
            nw = random.randint(0, N - 1)  # select one weight at random

            # backprop
            for jm in xrange(m):

                # compute derivative of output with
                # respect to input using back propagation
                dbpr = net.backprop(la.unit(
                    m, jm))  # initial delta is just a unit vector

                # numerical derivative
                dw = 1.e-6

                w_u = w[:]
                w_u[nw] = w_u[nw] + dw
                net.setw(w_u)
                z1 = net.apply(x)

                w_d = w[:]
                w_d[nw] = w_d[nw] - dw
                net.setw(w_d)
                z2 = net.apply(x)

                dnum = (z1[jm] - z2[jm]) / dw / 2

                # compare results
                self.assertAlmostEqual(
                    dbpr[nw], dnum, 5,
                    "Run %d, output %d: %e %e " % (j, jm, dbpr[nw], dnum))

                print j, nw, jm, dbpr[nw], dnum
def propagate(X, param):
    X1 = np.dot(param['W1'], X) + param['b1']
    Y1 = act.tanh(X1)
    X2 = np.dot(param['W2'], Y1) + param['b2']
    y = act.sigmoid(X2)
    return y, {'X1': X1, 'X2': X2, 'Y1': Y1, 'y': y}