Пример #1
0
 def train1(self, data, classes, verbose=0):
     n = len(data)
     testing = array(selection(xrange(n), n / 10), 'i')
     training = setdiff1d(array(xrange(n), 'i'), testing)
     testset = data[testing, :]
     testclasses = classes[testing]
     ntrain = min(self.initial_epochs * n, self.initial_ntrain)
     pool = []
     for nh in self.initial_nhidden:
         mlp = MLP(**self.kw)
         mlp.eta = log_uniform(*self.initial_eta)
         mlp.train(data,
                   classes,
                   etas=[(mlp.eta, ntrain)],
                   nhidden=nh,
                   verbose=0,
                   samples=training)
         mlp.err = error(mlp, testset, testclasses)
         if verbose:                print "AutoMLP initial","%.3f"%mlp.eta,nh,\
         mlp.err,"%.4f"%(mlp.err*1.0/len(testset))
         pool.append(mlp)
     for i in range(self.max_rounds):
         # if the pool is too large, pick only the best models
         errs = [x.err + 0.1 * x.nhidden() for x in pool]
         if len(errs) > self.max_pool:
             choice = argsort(errs)
             pool = list(take(pool, choice[:self.max_pool]))
         # pick a random model from the pool
         mlp = selection(pool, 1)[0]
         mlp = mlp.copy()
         # compute random learning rates and number of hidden units
         new_eta = exp(log(mlp.eta) + randn() * self.log_eta_var)
         new_nh = max(
             2, int(exp(log(mlp.nhidden()) + randn() * self.log_nh_var)))
         # train with the new parameters
         mlp.eta = new_eta
         mlp.changeHidden(data, classes, new_nh)
         mlp.train(data,
                   classes,
                   etas=[(mlp.eta, ntrain)],
                   verbose=(self.verbose > 1),
                   samples=training)
         # determine error on test set
         mlp.err = error(mlp, testset, testclasses)
         if verbose:
             print "AutoMLP pool",mlp.err,"%.4f"%(mlp.err*1.0/len(testset)),\
                 "(%.3f,%d)"%(mlp.eta,mlp.nhidden()),\
                 [x.err for x in pool]
         pool += [mlp]
         # to allow partial training, update this with the best model so far
         best = argmin([x.err + 0.1 * x.nhidden() for x in pool])
         mlp = pool[best]
         self.assign(mlp)
         yield Record(round=i,
                      rounds=self.max_rounds,
                      testerr=mlp.err * 1.0 / len(testset))
Пример #2
0
 def train1(self,data,classes,verbose=0):
     n = len(data)
     testing = array(selection(range(n),n/10),'i')
     training = setdiff1d(array(range(n),'i'),testing)
     testset = data[testing,:]
     testclasses = classes[testing]
     ntrain = min(self.initial_epochs*n,self.initial_ntrain)
     pool = []
     for nh in self.initial_nhidden:
         mlp = MLP(**self.kw)
         mlp.eta = log_uniform(*self.initial_eta)
         mlp.train(data,classes,etas=[(mlp.eta,ntrain)],
                   nhidden=nh,
                   verbose=0,
                   samples=training)
         mlp.err = error(mlp,testset,testclasses)
         if verbose: print("AutoMLP initial","%.3f"%mlp.eta,nh,\
                 mlp.err,"%.4f"%(mlp.err*1.0/len(testset)))
         pool.append(mlp)
     for i in range(self.max_rounds):
         # if the pool is too large, pick only the best models
         errs = [x.err+0.1*x.nhidden() for x in pool]
         if len(errs)>self.max_pool:
             choice = argsort(errs)
             pool = list(take(pool,choice[:self.max_pool]))
         # pick a random model from the pool
         mlp = selection(pool,1)[0]
         mlp = mlp.copy()
         # compute random learning rates and number of hidden units
         new_eta = exp(log(mlp.eta)+randn()*self.log_eta_var)
         new_nh = max(2,int(exp(log(mlp.nhidden())+randn()*self.log_nh_var)))
         # train with the new parameters
         mlp.eta = new_eta
         mlp.changeHidden(data,classes,new_nh)
         mlp.train(data,classes,etas=[(mlp.eta,ntrain)],
                   verbose=(self.verbose>1),samples=training)
         # determine error on test set
         mlp.err = error(mlp,testset,testclasses)
         if verbose:
             print("AutoMLP pool",mlp.err,"%.4f"%(mlp.err*1.0/len(testset)),\
                 "(%.3f,%d)"%(mlp.eta,mlp.nhidden()),\
                 [x.err for x in pool])
         pool += [mlp]
         # to allow partial training, update this with the best model so far
         best = argmin([x.err+0.1*x.nhidden() for x in pool])
         mlp = pool[best]
         self.assign(mlp)
         yield Record(round=i,rounds=self.max_rounds,testerr=mlp.err*1.0/len(testset))
Пример #3
0
 def decreaseHidden(self, data, cls, new_nhidden):
     """Decrease the number of hidden units. Data and cls might be used to
     pick which hidden units to delete (but currently are unused)."""
     ninput, nhidden, noutput = self.shape()
     keep = array([True] * nhidden)
     for i in selection(xrange(nhidden), nhidden - new_nhidden):
         keep[i] = False
     self.w1 = array(self.w1[keep, :], dtype="f", order="C")
     self.b1 = array(self.b1[keep], dtype="f", order="C")
     self.w2 = array(self.w2[:, keep], dtype="f", order="C")
Пример #4
0
 def decreaseHidden(self, data, cls, new_nhidden):
     """Decrease the number of hidden units. Data and cls might be used to
     pick which hidden units to delete (but currently are unused)."""
     ninput, nhidden, noutput = self.shape()
     keep = array([True] * nhidden)
     for i in selection(xrange(nhidden), nhidden - new_nhidden):
         keep[i] = False
     self.w1 = array(self.w1[keep, :], dtype='f', order="C")
     self.b1 = array(self.b1[keep], dtype='f', order="C")
     self.w2 = array(self.w2[:, keep], dtype='f', order="C")
Пример #5
0
 def init(self, data, cls, nhidden=None, eps=1e-2):
     data = data.reshape(len(data), prod(data.shape[1:]))
     scale = max(abs(amax(data)), abs(amin(data)))
     ninput = data.shape[1]
     if nhidden is None: nhidden = len(set(cls))
     noutput = amax(cls) + 1
     # print ninput,nhidden,noutput
     self.w1 = array(
         data[selection(xrange(len(data)), nhidden)] * eps / scale, 'f')
     self.b1 = array(uniform(-eps, eps, (nhidden, )), 'f')
     self.w2 = array(uniform(-eps, eps, (noutput, nhidden)), 'f')
     self.b2 = array(uniform(-eps, eps, (noutput, )), 'f')
Пример #6
0
 def init(self,data,cls,nhidden=None,eps=1e-2):
     """Initialize the network but perform no training yet.  The network units
     are initialized using the data, and the classes are used to determine the number
     of output units and (if no number of hidden units is given) the number of
     hidden units."""
     data = data.reshape(len(data),prod(data.shape[1:]))
     scale = max(abs(amax(data)),abs(amin(data)))
     # ninput = data.shape[1]
     if nhidden is None: nhidden = len(set(cls))
     noutput = amax(cls)+1
     self.w1 = array(data[selection(range(len(data)),nhidden)] * eps/scale,'f')
     self.b1 = array(uniform(-eps,eps,(nhidden,)),'f')
     self.w2 = array(uniform(-eps,eps,(noutput,nhidden)),'f')
     self.b2 = array(uniform(-eps,eps,(noutput,)),'f')
Пример #7
0
 def change_nhidden(self, data, cls, new_nhidden, subset=None):
     ninput, nhidden, noutput = self.shape()
     if nhidden == new_nhidden:
         pass
     elif nhidden > new_nhidden:
         keep = array([True] * nhidden)
         for i in selection(xrange(nhidden), nhidden - new_nhidden):
             keep[i] = False
         self.w1 = array(self.w1[keep, :], dtype='f', order="C")
         self.b1 = array(self.b1[keep], dtype='f', order="C")
         self.w2 = array(self.w2[:, keep], dtype='f', order="C")
     else:
         vs = []
         bs = []
         delta = new_nhidden - nhidden
         for i in range(delta):
             a, b = selection(xrange(nhidden), 2)
             l = 0.8 * rand(1)[0] + 0.1
             v = l * self.w1[a] + (1 - l) * self.w1[b]
             vs.append(v)
             b = l * self.b1[a] + (1 - l) * self.b1[b]
             bs.append(b)
         self.w1 = array(1.0 * vstack([self.w1, array(vs)]),
                         dtype='f',
                         order="C")
         self.b1 = array(1.0 * hstack([self.b1, array(bs)]),
                         dtype='f',
                         order="C")
         scale = 0.01 * mean(abs(self.w2))
         self.w2 = array(
             1.0 * hstack([self.w2, scale * randn(len(self.w2), delta)]),
             dtype='f',
             order="C")
     assert c_order(self.w1)
     assert c_order(self.b1)
     assert c_order(self.w2)
     assert c_order(self.b2)
Пример #8
0
 def init(self, data, cls, nhidden=None, eps=1e-2):
     """Initialize the network but perform no training yet.  The network units
     are initialized using the data, and the classes are used to determine the number
     of output units and (if no number of hidden units is given) the number of
     hidden units."""
     data = data.reshape(len(data), prod(data.shape[1:]))
     scale = max(abs(amax(data)), abs(amin(data)))
     ninput = data.shape[1]
     if nhidden is None: nhidden = len(set(cls))
     noutput = amax(cls) + 1
     self.w1 = array(
         data[selection(xrange(len(data)), nhidden)] * eps / scale, 'f')
     self.b1 = array(uniform(-eps, eps, (nhidden, )), 'f')
     self.w2 = array(uniform(-eps, eps, (noutput, nhidden)), 'f')
     self.b2 = array(uniform(-eps, eps, (noutput, )), 'f')
Пример #9
0
 def increaseHidden(self, data, cls, new_nhidden):
     """Increase the number of hidden units.  Data and cls are used to pick
     initial values for new hidden units."""
     nhidden = self.nhidden()
     vs = []
     bs = []
     delta = new_nhidden - nhidden
     for i in range(delta):
         a, b = selection(xrange(nhidden), 2)
         l = 0.8 * rand(1)[0] + 0.1
         v = l * self.w1[a] + (1 - l) * self.w1[b]
         vs.append(v)
         b = l * self.b1[a] + (1 - l) * self.b1[b]
         bs.append(b)
     self.w1 = array(1.0 * vstack([self.w1, array(vs)]), dtype="f", order="C")
     self.b1 = array(1.0 * hstack([self.b1, array(bs)]), dtype="f", order="C")
     scale = 0.01 * mean(abs(self.w2))
     vecs = [self.w2, scale * randn(len(self.w2), delta)]
     self.w2 = array(1.0 * hstack(vecs), dtype="f", order="C")
Пример #10
0
 def increaseHidden(self, data, cls, new_nhidden):
     """Increase the number of hidden units.  Data and cls are used to pick
     initial values for new hidden units."""
     nhidden = self.nhidden()
     vs = []
     bs = []
     delta = new_nhidden - nhidden
     for i in range(delta):
         a, b = selection(xrange(nhidden), 2)
         l = 0.8 * rand(1)[0] + 0.1
         v = l * self.w1[a] + (1 - l) * self.w1[b]
         vs.append(v)
         b = l * self.b1[a] + (1 - l) * self.b1[b]
         bs.append(b)
     self.w1 = array(1.0 * vstack([self.w1, array(vs)]),
                     dtype='f',
                     order="C")
     self.b1 = array(1.0 * hstack([self.b1, array(bs)]),
                     dtype='f',
                     order="C")
     scale = 0.01 * mean(abs(self.w2))
     vecs = [self.w2, scale * randn(len(self.w2), delta)]
     self.w2 = array(1.0 * hstack(vecs), dtype='f', order="C")