Example #1
0
 def testName(self):
     
     # read data
     data = reader.read("../data/log_1_fixed.txt", jstartline=15000, maxlines=5000)
             
     # preprocess data
     preprocess.preproc(data)
     
     # initialize model
     layers = [13, 8, 1]
     activf = [activation.linear(), activation.tanh(), activation.sigmoid()] #  ,  activation.tanh(1.75, 3./2.),
     net = ffnet.FFNet(layers, activf)
     net.initw(0.1) 
     
     # create training options
     opts = trainb.options()
     
     # write function
     f = open("../output/trainb_test.txt", "w+")
     writefcn = lambda s: f.write(s)
             
     # training
     trainb.train(data, opts, net, writefcn)
     
     # close file
     f.close()
Example #2
0
def train(x, y, iterations=50000, learning_rate=0.001):
    global W1, W2, B1, B2, error
    m = x.shape[0]
    error = []

    for _ in range(iterations):
        a0, z1, a1, z2, a2 = forward(x, predict=False)

        da2 = a2 - y.T
        dz2 = da2 * linear(z2, derivative=True)
        dw2 = dz2.dot(a1.T) / m
        db2 = np.sum(dz2, axis=1, keepdims=True) / m

        da1 = W2.T.dot(dz2)
        dz1 = np.multiply(da1, sigmoid(z1, derivative=True))
        dw1 = dz1.dot(a0.T) / m
        db1 = np.sum(dz1, axis=1, keepdims=True) / m

        W1 -= learning_rate * dw1
        B1 -= learning_rate * db1
        W2 -= learning_rate * dw2
        B2 -= learning_rate * db2

        error.append(np.average(da2**2))

    return error
Example #3
0
    def testName(self):

        # read data
        data = reader.read("../data/log_1_fixed.txt",
                           jstartline=15000,
                           maxlines=5000)

        # preprocess data
        preprocess.preproc(data)

        # initialize model
        layers = [13, 8, 1]
        activf = [
            activation.linear(),
            activation.tanh(),
            activation.sigmoid()
        ]  # activation.tanh(1.75, 3./2.),
        net = ffnet.FFNet(layers, activf)
        net.initw(0.1)

        # create training options
        opts = trainsg.options()

        # write function
        f = open("../output/trainsg_test.txt", "w+")
        writefcn = lambda s: f.write(s)

        # training
        trainsg.train(data, opts, net, writefcn)

        # close file
        f.close()
Example #4
0
def forward(x, predict=True):
    a0 = x.T
    z1 = W1.dot(a0) + B1
    a1 = sigmoid(z1)
    z2 = W2.dot(a1) + B2
    a2 = linear(z2)
    if predict is False:
        return a0, z1, a1, z2, a2
    return a2
Example #5
0
    def test_apply(self):

        # create a simple network
        net = ffnet.FFNet(
            [k, q, m],
            [activation.linear(),
             activation.tanh(),
             activation.sigmoid()])

        # some input
        x = [1] * k
        net.apply(x)
Example #6
0
def single_layer_fp(X, W, b, activation="sigmoid"):
    l = []
    for i in range(0, X.shape[1]):
        l.append(1)
    A = np.dot(W, X) + np.outer(b, np.array(l))
    if activation == "linear":
        S = act_fun.linear(A)
    elif activation == "sigmoid":
        S = act_fun.sigmoid(beta, A)
    elif activation == "tanh":
        S = act_fun.tanh(beta, A)
    elif activation == "relu":
        S = act_fun.relu(A)
    elif activation == "softplus":
        S = act_fun.softplus(A)
    elif activation == "elu":
        S = act_fun.elu(delta, A)
    elif activation == "softmax":
        S = act_fun.softmax(A)
    else:
        print("Activation function isn't supported")
    return (A, S)
Example #7
0
def train(k, data):

    # initialize new model
    layers = [13, 8, 1]
    activf = [activation.linear(), activation.tanh(), activation.sigmoid()]
    net = ffnet.FFNet(layers, activf)
    net.initw(0.1)

    # use default training options
    opts = trainsg.options()
    opts.rate = 2.e-4

    # write function
    f = open("../output/train-%s.txt" % k, "w+")
    writefcn = lambda s: f.write(s)

    # training
    net = trainsg.train(data, opts, net, writefcn)

    # close file
    f.close()

    # return trained network
    return net
Example #8
0
def train(k, data):
    
    # initialize new model
    layers = [13, 8, 1]
    activf = [activation.linear(), activation.tanh(), activation.sigmoid()]  
    net = ffnet.FFNet(layers, activf)
    net.initw(0.1) 
    
    # use default training options
    opts = trainsg.options()
    opts.rate = 2.e-4
    
    # write function
    f = open("../output/train-%s.txt" % k, "w+")
    writefcn = lambda s: f.write(s)

    # training
    net = trainsg.train(data, opts, net, writefcn)

    # close file
    f.close()
    
    # return trained network
    return net
Example #9
0
class Test(unittest.TestCase):

    seed = 1.0

    random.seed(seed)
    
    ninp = 10       # number of features
    nhid = 8        # number of hidden units
    nq = 5         # number of samples in a query
        
    # generated query data 
    query = [ [0, random.choice([0, 1])] + [random.random() for _ in xrange(ninp)] for _ in xrange(nq) ]
                 
    # neural network model
    model = ffnet.FFNet([ninp, nhid, 1], [activation.linear(), activation.tanh(1.75, 3./2.), activation.sigmoid()]) 
        
    # random weights
    model.initw(1.0, seed)   
    w = model.getw()
    
    # ranknet sigma
    sigma = 1.0
            
    def test_s(self):
        self.assertEqual(1, ranknet.S(1, 0))
        self.assertEqual(0, ranknet.S(1, 1))
        self.assertEqual(0, ranknet.S(0, 0))
        self.assertEqual(-1, ranknet.S(0, 1))
       
    def test_cost(self):
               
        C = ranknet.cost(self.query, self.model, self.sigma)
        print C

    def test_gradient(self):
        
        # analytical gradient
        dbpr = ranknet.gradient(self.query, self.model, self.sigma)
        
        # numerical gradient
        dw = 1.e-5
        jw = 7
        
        w_u = self.w[:]
        w_u[jw] = w_u[jw] + dw
        self.model.setw(w_u)
        C_u = ranknet.cost(self.query, self.model, self.sigma)
        
        w_d = self.w[:]
        w_d[jw] = w_d[jw] - dw
        self.model.setw(w_d)
        C_d = ranknet.cost(self.query, self.model, self.sigma)
        
        dnum = (C_u[0] - C_d[0]) / dw / 2.0
        
        print dbpr[jw], dnum
    
    def test_gradient_2(self):
        
        # run tests
        ntest = 100
        ninp = 10
        nq = 20
        nhid = self.nhid
        nw = (ninp + 1) * nhid + (nhid + 1) * 1     # total number of weights
        dw = 1.e-6
               
        for j in xrange(ntest):
            
            # generate query data
            query = [ [0, random.choice([0, 1])] + [random.random() for _ in xrange(ninp)] for _ in xrange(nq) ]
            
            # get analytical gradient
            grad = ranknet.gradient(query, self.model, self.sigma)
                        
            # select weight at random
            jw = random.choice(xrange(nw))
                
            # numerical derivative                
            w_u = self.w[:]
            w_u[jw] = w_u[jw] + dw
            self.model.setw(w_u)
            C_u = ranknet.cost(query, self.model, self.sigma)
        
            w_d = self.w[:]
            w_d[jw] = w_d[jw] - dw
            self.model.setw(w_d)
            C_d = ranknet.cost(query, self.model, self.sigma)
        
            dnum = (C_u[0] - C_d[0]) / dw / 2.0
                
            # compare results
            #self.assertAlmostEqual(grad[jw], dnum, 5, "Run %d: %e %e " % (j, grad[jw], dnum))

            print j, jw, grad[jw], dnum
            
    def xtest_training_1(self):
        
        # trainsg on a single query
        
        nepoch = 10000    # number of training epochs
        rate = 0.1        # learning rate
        nprint = 1000     # print frequency
                
        for je in xrange(nepoch):
            
            # compute current cost and estimations
            C = ranknet.cost(self.query, self.model, self.sigma)
            if je % nprint == 0:
                print je, C[0], C[1], C[2]
                print "w:", self.model.getw() 
            # compute gradients
            g = ranknet.gradient(self.query, self.model, self.sigma)
        
            # update weights
            w = la.vsum( self.model.getw(), la.sax(-rate, g) )
            self.model.setw(w)
        
    def xtest_training_2(self): 
        
        # trainsg on several queries
        data = []
        d = range(10)
        for j in d:
            data.append( [ [j, random.choice([0, 1])] + [random.random() for _ in xrange(self.ninp)] for _ in xrange(self.nq) ] )
        
        print data
                
        nepoch = 10000    # number of training epochs
        rate = 0.1        # learning rate
        nprint = 1000     # print frequency
        
        # compute current cost and estimations
        for je in xrange(nepoch):
            
            # select training sample at random
            jq = random.choice(d)   
            
            if je % nprint == 0:
                
                # compute cost of a first sample
                C = ranknet.cost(data[0], self.model, self.sigma)
                
                print je, C[0], C[1], C[2]
                print "w:", self.model.getw() 
            
            # compute gradients
            g = ranknet.gradient(data[jq], self.model, self.sigma)
        
            # update weights
            w = la.vsum( self.model.getw(), la.sax(-rate, g) )
            self.model.setw(w)
    
        # final report
        for query in data:
            print "Query: ", query[0][0]
            C = ranknet.cost(query, self.model, self.sigma)
            for j in xrange(len(query)):
                print query[j][1], C[1][j]
Example #10
0
    def test_backprop(self):

        #===================================
        # Gradient check
        #===================================

        # create a simple network
        net = ffnet.FFNet([k, q, m], [
            activation.linear(),
            activation.sigmoid(1.2),
            activation.tanh(2.0, 3.0 / 2.0)
        ])

        # set random weights
        wscale = 3.0
        w = net.getw()
        w = [random.uniform(-wscale, wscale) for _ in w]
        net.setw(w)

        # run tests
        ntest = 1000

        for j in xrange(ntest):

            # generate random input vector
            xscale = 5.0
            x = [random.uniform(-xscale, xscale) for _ in xrange(k)]

            # propagate forward
            net.apply(x)

            # select a weight at random
            N = (k + 1) * q + (q + 1) * m  # total number of weights
            nw = random.randint(0, N - 1)  # select one weight at random

            # backprop
            for jm in xrange(m):

                # compute derivative of output with
                # respect to input using back propagation
                dbpr = net.backprop(la.unit(
                    m, jm))  # initial delta is just a unit vector

                # numerical derivative
                dw = 1.e-6

                w_u = w[:]
                w_u[nw] = w_u[nw] + dw
                net.setw(w_u)
                z1 = net.apply(x)

                w_d = w[:]
                w_d[nw] = w_d[nw] - dw
                net.setw(w_d)
                z2 = net.apply(x)

                dnum = (z1[jm] - z2[jm]) / dw / 2

                # compare results
                self.assertAlmostEqual(
                    dbpr[nw], dnum, 5,
                    "Run %d, output %d: %e %e " % (j, jm, dbpr[nw], dnum))

                print j, nw, jm, dbpr[nw], dnum