示例#1
0
    def test_initw(self):

        # create a simple network
        net = ffnet.FFNet([k, q, m], [])
        net.initw(2.0)

        print net.W
示例#2
0
    def testName(self):

        # read data
        data = reader.read("../data/log_1_fixed.txt",
                           jstartline=15000,
                           maxlines=5000)

        # preprocess data
        preprocess.preproc(data)

        # initialize model
        layers = [13, 8, 1]
        activf = [
            activation.linear(),
            activation.tanh(),
            activation.sigmoid()
        ]  # activation.tanh(1.75, 3./2.),
        net = ffnet.FFNet(layers, activf)
        net.initw(0.1)

        # create training options
        opts = trainsg.options()

        # write function
        f = open("../output/trainsg_test.txt", "w+")
        writefcn = lambda s: f.write(s)

        # training
        trainsg.train(data, opts, net, writefcn)

        # close file
        f.close()
示例#3
0
    def test_getw(self):

        # create a simple network
        net = ffnet.FFNet([k, q, m], [])

        # Test exported flat list
        self.assertEqual((k + 1) * q + (q + 1) * m, len(net.getw()),
                         "#2 Wrong weight matrix dimensions")
示例#4
0
    def __init__(self, layers):
        """
        :param layers: a list of fully-connected layers
        """
        self.net = ffnet.FFNet(layers)

        if self.net.layers[0].shape[0] != self.net.layers[-1].shape[1]:
            raise ValueError('In the given autoencoder number of inputs and outputs is different!')
示例#5
0
    def test_setw(self):

        # create a simple network
        net = ffnet.FFNet([k, q, m], [])

        # set weights of the model: first layer with ones, second layer with twos
        net.setw([1] * (q * (k + 1)) + [2] * m * (q + 1))

        # Test updated weights
        self.assertEqual(1.0, net.W[0][0][0], "#3.1 Should be updated")
示例#6
0
    def test_init(self):

        # create a simple network
        net = ffnet.FFNet([k, q, m], [])

        # Test internal structured weight matrices
        self.assertEqual(n - 1, len(net.W), "#1.1 Dimensions")
        self.assertEqual(k + 1, len(net.W[0]), "#1.2 Dimensions")
        self.assertEqual(q, len(net.W[0][0]), "#1.3 Dimensions")
        self.assertEqual(q + 1, len(net.W[1]), "#1.4 Dimensions")
        self.assertEqual(m, len(net.W[1][0]), "#1.5 Dimensions")
示例#7
0
    def test_apply(self):

        # create a simple network
        net = ffnet.FFNet(
            [k, q, m],
            [activation.linear(),
             activation.tanh(),
             activation.sigmoid()])

        # some input
        x = [1] * k
        net.apply(x)
示例#8
0
    def __init__(self, layers):
        """
        :param layers: a list of fully-connected layers
        """
        self.net = ffnet.FFNet(layers)

        if self.net.layers[0].shape[0] != self.net.layers[-1].shape[1]:
            raise ValueError(
                'In the given autoencoder number of inputs and outputs is different!'
            )

        self.hist = {
            'train_loss': [],
            'train_grad': [],
            'test_loss': [],
            'test_grad': [],
            'epoch': [],
            'optimizer': "",
            'elaps_time': 0
        }
        self.summary()
示例#9
0
def train(k, data):

    # initialize new model
    layers = [13, 8, 1]
    activf = [activation.linear(), activation.tanh(), activation.sigmoid()]
    net = ffnet.FFNet(layers, activf)
    net.initw(0.1)

    # use default training options
    opts = trainsg.options()
    opts.rate = 2.e-4

    # write function
    f = open("../output/train-%s.txt" % k, "w+")
    writefcn = lambda s: f.write(s)

    # training
    net = trainsg.train(data, opts, net, writefcn)

    # close file
    f.close()

    # return trained network
    return net
示例#10
0
class Test(unittest.TestCase):

    seed = 1.0

    random.seed(seed)
    
    ninp = 10       # number of features
    nhid = 8        # number of hidden units
    nq = 5         # number of samples in a query
        
    # generated query data 
    query = [ [0, random.choice([0, 1])] + [random.random() for _ in xrange(ninp)] for _ in xrange(nq) ]
                 
    # neural network model
    model = ffnet.FFNet([ninp, nhid, 1], [activation.linear(), activation.tanh(1.75, 3./2.), activation.sigmoid()]) 
        
    # random weights
    model.initw(1.0, seed)   
    w = model.getw()
    
    # ranknet sigma
    sigma = 1.0
            
    def test_s(self):
        self.assertEqual(1, ranknet.S(1, 0))
        self.assertEqual(0, ranknet.S(1, 1))
        self.assertEqual(0, ranknet.S(0, 0))
        self.assertEqual(-1, ranknet.S(0, 1))
       
    def test_cost(self):
               
        C = ranknet.cost(self.query, self.model, self.sigma)
        print C

    def test_gradient(self):
        
        # analytical gradient
        dbpr = ranknet.gradient(self.query, self.model, self.sigma)
        
        # numerical gradient
        dw = 1.e-5
        jw = 7
        
        w_u = self.w[:]
        w_u[jw] = w_u[jw] + dw
        self.model.setw(w_u)
        C_u = ranknet.cost(self.query, self.model, self.sigma)
        
        w_d = self.w[:]
        w_d[jw] = w_d[jw] - dw
        self.model.setw(w_d)
        C_d = ranknet.cost(self.query, self.model, self.sigma)
        
        dnum = (C_u[0] - C_d[0]) / dw / 2.0
        
        print dbpr[jw], dnum
    
    def test_gradient_2(self):
        
        # run tests
        ntest = 100
        ninp = 10
        nq = 20
        nhid = self.nhid
        nw = (ninp + 1) * nhid + (nhid + 1) * 1     # total number of weights
        dw = 1.e-6
               
        for j in xrange(ntest):
            
            # generate query data
            query = [ [0, random.choice([0, 1])] + [random.random() for _ in xrange(ninp)] for _ in xrange(nq) ]
            
            # get analytical gradient
            grad = ranknet.gradient(query, self.model, self.sigma)
                        
            # select weight at random
            jw = random.choice(xrange(nw))
                
            # numerical derivative                
            w_u = self.w[:]
            w_u[jw] = w_u[jw] + dw
            self.model.setw(w_u)
            C_u = ranknet.cost(query, self.model, self.sigma)
        
            w_d = self.w[:]
            w_d[jw] = w_d[jw] - dw
            self.model.setw(w_d)
            C_d = ranknet.cost(query, self.model, self.sigma)
        
            dnum = (C_u[0] - C_d[0]) / dw / 2.0
                
            # compare results
            #self.assertAlmostEqual(grad[jw], dnum, 5, "Run %d: %e %e " % (j, grad[jw], dnum))

            print j, jw, grad[jw], dnum
            
    def xtest_training_1(self):
        
        # trainsg on a single query
        
        nepoch = 10000    # number of training epochs
        rate = 0.1        # learning rate
        nprint = 1000     # print frequency
                
        for je in xrange(nepoch):
            
            # compute current cost and estimations
            C = ranknet.cost(self.query, self.model, self.sigma)
            if je % nprint == 0:
                print je, C[0], C[1], C[2]
                print "w:", self.model.getw() 
            # compute gradients
            g = ranknet.gradient(self.query, self.model, self.sigma)
        
            # update weights
            w = la.vsum( self.model.getw(), la.sax(-rate, g) )
            self.model.setw(w)
        
    def xtest_training_2(self): 
        
        # trainsg on several queries
        data = []
        d = range(10)
        for j in d:
            data.append( [ [j, random.choice([0, 1])] + [random.random() for _ in xrange(self.ninp)] for _ in xrange(self.nq) ] )
        
        print data
                
        nepoch = 10000    # number of training epochs
        rate = 0.1        # learning rate
        nprint = 1000     # print frequency
        
        # compute current cost and estimations
        for je in xrange(nepoch):
            
            # select training sample at random
            jq = random.choice(d)   
            
            if je % nprint == 0:
                
                # compute cost of a first sample
                C = ranknet.cost(data[0], self.model, self.sigma)
                
                print je, C[0], C[1], C[2]
                print "w:", self.model.getw() 
            
            # compute gradients
            g = ranknet.gradient(data[jq], self.model, self.sigma)
        
            # update weights
            w = la.vsum( self.model.getw(), la.sax(-rate, g) )
            self.model.setw(w)
    
        # final report
        for query in data:
            print "Query: ", query[0][0]
            C = ranknet.cost(query, self.model, self.sigma)
            for j in xrange(len(query)):
                print query[j][1], C[1][j]
示例#11
0
    def test_backprop(self):

        #===================================
        # Gradient check
        #===================================

        # create a simple network
        net = ffnet.FFNet([k, q, m], [
            activation.linear(),
            activation.sigmoid(1.2),
            activation.tanh(2.0, 3.0 / 2.0)
        ])

        # set random weights
        wscale = 3.0
        w = net.getw()
        w = [random.uniform(-wscale, wscale) for _ in w]
        net.setw(w)

        # run tests
        ntest = 1000

        for j in xrange(ntest):

            # generate random input vector
            xscale = 5.0
            x = [random.uniform(-xscale, xscale) for _ in xrange(k)]

            # propagate forward
            net.apply(x)

            # select a weight at random
            N = (k + 1) * q + (q + 1) * m  # total number of weights
            nw = random.randint(0, N - 1)  # select one weight at random

            # backprop
            for jm in xrange(m):

                # compute derivative of output with
                # respect to input using back propagation
                dbpr = net.backprop(la.unit(
                    m, jm))  # initial delta is just a unit vector

                # numerical derivative
                dw = 1.e-6

                w_u = w[:]
                w_u[nw] = w_u[nw] + dw
                net.setw(w_u)
                z1 = net.apply(x)

                w_d = w[:]
                w_d[nw] = w_d[nw] - dw
                net.setw(w_d)
                z2 = net.apply(x)

                dnum = (z1[jm] - z2[jm]) / dw / 2

                # compare results
                self.assertAlmostEqual(
                    dbpr[nw], dnum, 5,
                    "Run %d, output %d: %e %e " % (j, jm, dbpr[nw], dnum))

                print j, nw, jm, dbpr[nw], dnum