Пример #1
0
    def train(self):

        start_mem = memory()

        starttime = time.time()
        ##        model name	: Intel(R) Xeon(R) CPU           W3570  @ 3.20GHz

        print >> logs, "%d CPUs at %s %s" % (
            cpu_count(), os.popen("cat /proc/cpuinfo|grep GHz").readlines()
            [-1].strip().split(":")[-1], os.popen("cat /proc/cpuinfo|grep MHz")
            .readlines()[-1].strip().split(":")[-1])

        print >> logs, "starting perceptron at", time.ctime()

        best_prec = 0
        for it in xrange(1, self.iter + 1):

            print >> logs, "iteration %d starts..............%s" % (
                it, time.ctime())

            curr_mem = memory()  # outside of multi

            print >> logs, "memory usage at iter %d before pool: %s" % (
                it, human(memory(start_mem)))

            iterstarttime = time.time()

            if Perceptron.shuffle:
                self.shuffle_train()

            if not FLAGS.singletrain:
                pool = Pool(processes=self.ncpus)
            pool_time = time.time() - iterstarttime

            num_updates, early_updates = 0, 0
            ##            new_allweights, new_weights = self.decoder.model.new_weights(), self.decoder.model.new_weights()

            print >> logs, "memory usage at iter %d after pool: %s" % (
                it, human(memory(start_mem)))

            tt = time.time()
            print >> logs, "before para time...", tt
            results = map(self.train_worker, self.trainchunks) if FLAGS.singletrain else \
                          pool.map(self.train_worker, self.trainchunks, chunksize=1)

            if FLAGS.mydouble:
                print >> logs, "mydouble usage and freed: %d %d" % counts(), \
                      "|w|=", len(Perceptron.weights), "|avgw|=", len(Perceptron.allweights) if FLAGS.avg else 0, \
                      "|dw|=", len(results[0][-1][0])

            print >> logs, "after para time...", time.time()
            compute_time = time.time() - tt

            copy_time = 0
            para_times = []
            for dtime, size, (_num_updates,
                              _early_updates), (_weights,
                                                _allweights) in results:

                num_updates += _num_updates
                early_updates += _early_updates

                factor = size / self.trainsize  # not exactly uniform (if not equal-size split)!

                tt = time.time()
                if not FLAGS.singletrain:
                    Perceptron.weights.iaddc(_weights, factor)
#                 print _weights
#                 print new_weights
#                 print

                if self.avg:
                    if FLAGS.naiveavg:
                        Perceptron.allweights.iaddc(_allweights, factor)
                    else:
                        Perceptron.allweights.iaddc(_allweights, factor)

                del _weights, _allweights

                copy_time += time.time() - tt

                para_times.append(dtime)

            del results

            if not FLAGS.singletrain:
                pool.close()
                pool.join()
##            else:
##                del self.delta_weights, self.delta_allweights # not in process

            print >> logs, "gc can't reach", gc.collect()

            print >> logs, "pool_time= %.1f s, compute_walltime= %.1f s, compute_cputime= %.1f (%s), copy_time= %.1f s" \
                  % (pool_time, compute_time, sum(para_times), " ".join("%.1f" % x for x in para_times), copy_time)

            print >> logs, "memory usage at iter %d after fork: %s" % (
                it, human(memory(start_mem)))

            if not FLAGS.singletrain:  # N.B.: in non-multiproc mode, self.c is updated
                Perceptron.c += self.trainsize / self.ncpus
                print >> logs, "self.c=", Perceptron.c

#             if self.avg:
#                 Perceptron.allweights = new_allweights
#            Perceptron.weights, Decoder.model.weights = new_weights, new_weights

##            num_updates, early_updates = self.one_pass_on_train() # old single-cpu
            iterendtime = time.time()

            print >> logs, "memory usage at iter %d: extra %s, total %s" % (
                it, human(memory(curr_mem)), human(memory(start_mem)))
            if FLAGS.debuglevel >= 1:
                print >> logs, "weights=", Perceptron.weights

            curr_mem = memory()

            print >> logs, "iteration %d training finished at %s. now evaluating on dev..." % (
                it, time.ctime())

            avgweights = self.avg_weights() if self.avg else Perceptron.weights
            if FLAGS.avg and FLAGS.debuglevel >= 1:
                print >> logs, "avgweights=", avgweights

            avgendtime = time.time()
            print >> logs, "avg weights (trim) took %.1f seconds." % (
                avgendtime - iterendtime)
            if FLAGS.debuglevel >= 2:
                print >> logs, "avg w=", avgweights


##            avgweights = self.decoder.model.new_weights()

            self.decoder.model.weights = avgweights  # OK if noavg; see above
            Parser.State.model.weights = avgweights  # multiprocessing: State.model is static

            prec = self.eval_on_dev()
            print >> logs, "eval on dev took %.1f seconds." % (time.time() -
                                                               avgendtime)


            print >> logs, "at iteration {0}, updates= {1} (early {4}), dev= {2}, |w|= {3}, time= {5:.3f}h acctime= {6:.3f}h"\
                  .format(it, num_updates, prec, len(avgweights), early_updates, \
                          (time.time() - iterstarttime)/3600, (time.time() - starttime)/3600.)
            logs.flush()

            if prec > best_prec:
                best_prec = prec
                best_it = it
                best_wlen = len(avgweights)
                print >> logs, "new high at iteration {0}: {1}. Dumping Weights...".format(
                    it, prec)
                self.dump(avgweights)

            self.decoder.model.weights = Perceptron.weights  # restore non-avg

            del avgweights
            print >> logs, "gc can't reach", gc.collect()

            if FLAGS.mydouble:
                print >> logs, "mydouble usage and freed: %d %d ------------------------" % counts(
                )

            logs.flush()  # for hpc

        print >> logs, "peaked at iteration {0}: {1}, |bestw|= {2}.".format(
            best_it, best_prec, best_wlen)
        print >> logs, "perceptron training of %d iterations finished on %s (took %.2f hours)"  % \
              (it, time.ctime(), (time.time() - starttime)/3600.)
Пример #2
0
c += d
#c.__iadd__(d)
print "here", c

e = d # mydefaultdict(mydouble, [("1", 1.)])
# print e
# print type(e.get("1"))  # float

print e["b"]
e["b"] += 1
print e["b"], type(e.get("b"))
e.get("c").incr()
print e["c"], type(e.get("c"))

print "before del e[...]", counts()
del e["c"]
print "after  del e[...]", counts()

for i in xrange(10000):
    continue
    e[str(i)+"a"] = 1#mydouble(1)
    e.get(str(i)).incr()

print counts()
print len(e)
print e.dot(d)

print "making large dict..."
f = mydefaultdict(mydouble, [(x, mydouble(x)) for x in range(1000000)])
print "done"
Пример #3
0
def main():

    if FLAGS.sim is not None:
        sequencefile = open(FLAGS.sim)

    parser = Parser(model, b=FLAGS.beam)

    print >> logs, "memory usage before parsing: ", human(memory(start_mem))

    totalscore = 0
    totalstates = 0
    totaluniq = 0
    totaledges = 0
    totaltime = 0

    totalprec = DepVal()    
    totaloracle = DepVal()

    print >> logs, "gc.collect unreachable: %d" % gc.collect()

    if FLAGS.manual_gc:
        gc.disable()
    
    i = 0
    gctime = 0
    for i, line in enumerate(shell_input(), 1):

        if FLAGS.manual_gc and i % FLAGS.gc == 0:
            print >> logs, "garbage collection...",
            tt = time.time()
            print >> logs, "gc.collect unreachable: %d" % gc.collect()
            tt = time.time() - tt
            print >> logs, "took %.1f seconds" % tt
            gctime += tt

        line = line.strip()
        if line[0]=="(":
            # input is a gold tree (so that we can evaluate)
            reftree = DepTree.parse(line)
            sentence = DepTree.sent # assigned in DepTree.parse()            
        else:
            # input is word/tag list
            reftree = None
            sentence = [tuple(x.rsplit("/", 1)) for x in line.split()]   # split by default returns list            
            DepTree.sent = sentence

        if FLAGS.debuglevel >= 1:
            print >> logs, sentence
            print >> logs, reftree

        mytime.zero()
        
        if FLAGS.sim is not None: # simulation, not parsing
            actions = map(int, sequencefile.readline().split())
            goal, feats = parser.simulate(actions, sentence) #if model is None score=0
            print >> logs, feats
            score, tree = goal.score, goal.top()
            (nstates, nedges, nuniq) = (0, 0, 0)
        else:
            # real parsing
            if True: #FLAGS.earlystop:
                refseq = reftree.seq() if reftree is not None else None
                tree, myseq, score, _ = parser.try_parse(sentence, refseq, update=False)
                if FLAGS.early:
                    print >> logs, "ref=", refseq
                    print >> logs, "myt=", myseq

                    refseq = refseq[:len(myseq)] # truncate
                    _, reffeats = parser.simulate(refseq, sentence) 
                    _, myfeats = parser.simulate(myseq, sentence)
                    print >> logs, "+feats", reffeats
                    print >> logs, "-feats", myfeats
                    
                nstates, nedges, nuniq = parser.stats()
            else:
                goal = parser.parse(sentence)
                nstates, nedges, nuniq = parser.stats()

##        score, tree = goal.score, goal.top()
#        score, tree = mytree
            
        dtime = mytime.period()

        if not FLAGS.early and not FLAGS.profile:
            if FLAGS.forest:
                parser.dumpforest(i)
            elif FLAGS.output:
                if not FLAGS.kbest:
                    print tree
                else:
                    stuff = parser.beams[-1][:FLAGS.kbest]
                    print "sent.%d\t%d" % (i, len(stuff))
                    for state in stuff:
                        print "%.2f\t%s" % (state.score, state.tree())
                    print
                    
            if FLAGS.oracle:
                oracle, oracletree = parser.forestoracle(reftree)
                totaloracle += oracle

        prec = DepTree.compare(tree, reftree) # OK if either is None

        searched = sum(x.derivation_count() for x in parser.beams[-1]) if FLAGS.forest else 0
        print >> logs, "sent {i:-4} (len {l}):\tmodelcost= {c:.2f}\tprec= {p:.2%}"\
              "\tstates= {ns} (uniq {uq})\tedges= {ne}\ttime= {t:.3f}\tsearched= {sp}" \
              .format(i=i, l=len(sentence), c=score, p=prec.prec(), \
                      ns=nstates, uq=nuniq, ne=nedges, t=dtime, sp=searched)
        if FLAGS.seq:
            actions = goal.all_actions()
            print >> logs, " ".join(actions)
            check = simulate(actions, sentence, model) #if model is None score=0
            checkscore = check.score
            checktree = check.top()
            print >> logs, checktree
            checkprec = checktree.evaluate(reftree)
            print >> logs, "verify: tree:%s\tscore:%s\tprec:%s" % (tree == checktree, score == checkscore, prec == checkprec)
            print >> logs, "sentence %-4d (len %d): modelcost= %.2lf\tprec= %.2lf\tstates= %d (uniq %d)\tedges= %d\ttime= %.3lf" % \
                  (i, len(sentence), checkscore, checkprec.prec100(), nstates, nuniq, nedges, dtime)

        totalscore += score
        totalstates += nstates
        totaledges += nedges
        totaluniq += nuniq
        totaltime += dtime

        totalprec += prec

    if i == 0:
        print >> logs, "Error: empty input."
        sys.exit(1)

    if FLAGS.featscache:
        print >> logs, "feature constructions: tot= %d shared= %d (%.2f%%)" % (State.tot, State.shared, State.shared / State.tot * 100)

    print >> logs, "beam= {b}, avg {a} sents,\tmodelcost= {c:.2f}\tprec= {p:.2%}" \
          "\tstates= {ns:.1f} (uniq {uq:.1f})\tedges= {ne:.1f}\ttime= {t:.4f}\n{d:s}" \
          .format(b=FLAGS.b, a=i, c=totalscore/i, p=totalprec.prec(), 
                  ns=totalstates/i, uq=totaluniq/i, ne=totaledges/i, t=totaltime/i, 
                  d=totalprec.details())
    
    if FLAGS.uniqstat:
        for i in sorted(uniqstats):
            print >> logs, "%d\t%.1lf\t%d\t%d" % \
                  (i, sum(uniqstats[i]) / len(uniqstats[i]), \
                   min(uniqstats[i]), max(uniqstats[i]))

    if FLAGS.oracle:
        print >> logs, "oracle= ", totaloracle

    if FLAGS.manual_gc:
        print >> logs, "garbage collection took %.1f seconds" % gctime

    print >> logs, "memory usage after parsing: ", human(memory(start_mem))
    if FLAGS.mydouble:
        from mydouble import counts
        print >> logs, "mydouble usage and freed: %d %d" % counts()
Пример #4
0
from _mycollections import mydefaultdict
from mydouble import mydouble, counts

d = mydefaultdict(mydouble)  # always like that

d["a"] = 1  # no need to say mydouble(1); transparent to the user
print d

print d.addc(d, 0.5)

for i in xrange(500000):
    d[str(i)] = 2

print len(d)

import gc
e = d.copy()
print "before", e["a"], counts()

for i in xrange(20):
    #    e = e.deepcopy()
    e.iaddc(d, 0.5)
    #    e.addc(d, 0.5)
    print e["a"], counts()
#    gc.collect()
#     del e
# ##    gc.collect()
#     e = f

print d["a"]
Пример #5
0
c += d
#c.__iadd__(d)
print "here", c

e = d  # mydefaultdict(mydouble, [("1", 1.)])
# print e
# print type(e.get("1"))  # float

print e["b"]
e["b"] += 1
print e["b"], type(e.get("b"))
e.get("c").incr()
print e["c"], type(e.get("c"))

print "before del e[...]", counts()
del e["c"]
print "after  del e[...]", counts()

for i in xrange(10000):
    continue
    e[str(i) + "a"] = 1  #mydouble(1)
    e.get(str(i)).incr()

print counts()
print len(e)
print e.dot(d)

print "making large dict..."
f = mydefaultdict(mydouble, [(x, mydouble(x)) for x in range(1000000)])
print "done"
Пример #6
0
    def train(self):

        start_mem = memory()       

        starttime = time.time()

        if FLAGS.finaldump:
            Perceptron.best_weights = self.decoder.model.new_weights()

##        model name	: Intel(R) Xeon(R) CPU           W3570  @ 3.20GHz

        print >> logs, "%d CPUs at %s %s" % (cpu_count(),
                                             os.popen("cat /proc/cpuinfo|grep [GM]Hz").readlines()[0].strip().split(":")[-1],
                                             os.popen("cat /proc/cpuinfo|grep [GM]Hz").readlines()[-1].strip().split(":")[-1])
        
        print >> logs, "starting perceptron at", time.ctime()        

        best_prec = 0
        acc_steps = 0
        for it in xrange(1, self.iter+1):
            Perceptron.curr = it 

            print >> logs, "iteration %d starts..............%s" % (it, time.ctime())

            curr_mem = memory() # outside of multi

            print >> logs, "memory usage at iter %d before pool: %s" % (it, human(memory(start_mem)))

            iterstarttime = time.time()

            if Perceptron.shuffle:
                self.shuffle_train()

            if not Perceptron.singletrain:
                pool = Pool(processes=self.ncpus)
            pool_time = time.time() - iterstarttime

            num_updates, early_updates, total_steps, bad_updates = 0, 0, 0, 0
##            new_allweights, new_weights = self.decoder.model.new_weights(), self.decoder.model.new_weights()

            print >> logs, "memory usage at iter %d after pool: %s" % (it, human(memory(start_mem)))

            tt= time.time()
            print >> logs, "before para time...", tt
            results = map(self.train_worker, self.trainchunks) if Perceptron.singletrain else \
                          pool.map(self.train_worker, self.trainchunks, chunksize=1)

            if FLAGS.mydouble:
                print >> logs, "mydouble usage and freed: %d %d" % counts(), \
                      "|w|=", len(Perceptron.weights), "|avgw|=", len(Perceptron.weights) if FLAGS.avg else 0, \
                      "|dw|=", len(results[0][-1])

            print >> logs, "after para time...", time.time()
            compute_time = time.time() - tt

            copy_time = 0
            para_times = []
            for dtime, size, (_num_updates, _early_updates, _steps, _bad_updates), _weights in results:

                num_updates += _num_updates
                early_updates += _early_updates
                total_steps += _steps
                bad_updates += _bad_updates
                
                factor = size / self.trainsize * Perceptron.learning_rate

                tt = time.time()
                if not Perceptron.singletrain: # singletrain: updated in place in one_pass_on_train()
                    Perceptron.weights.iaddc(_weights, factor)

                del _weights #, _allweights
                    
                copy_time += time.time() - tt

                para_times.append(dtime)

            del results
            
            if not Perceptron.singletrain:
                pool.close()
                pool.join()
                
            print >> logs, "gc can't reach", gc.collect()

            print >> logs, "pool_time= %.1f s, compute_walltime= %.1f s, compute_cputime= %.1f (%s), copy_time= %.1f s" \
                  % (pool_time, compute_time, sum(para_times), " ".join("%.1f" % x for x in para_times), copy_time)
            
            print >> logs, "memory usage at iter %d after fork: %s" % (it, human(memory(start_mem)))            

            if not Perceptron.singletrain: # N.B.: in non-multiproc mode, self.c is updated in place
                Perceptron.c += self.trainsize / self.ncpus

            print >> logs, "self.c=", Perceptron.c
            ##print >> logs, "w =", Perceptron.weights
            
            iterendtime = time.time()

            print >> logs, "memory usage at iter %d: extra %s, total %s" % (it,
                                                                            human(memory(curr_mem)),
                                                                            human(memory(start_mem)))
            if FLAGS.debuglevel >= 1:
                print >> logs, "weights=", Perceptron.weights

            curr_mem = memory()
                                                            
            print >> logs, "iteration %d training finished at %s. now evaluating on dev..." % (it, time.ctime())
            
##            avgweights = self.avg_weights() if self.avg else Perceptron.weights
            if self.avg:
##                Perceptron.weights.set_step(Perceptron.c)
                Perceptron.weights.set_avg(Perceptron.c)
                if FLAGS.debuglevel >= 1:
                    print >> logs, "avgweights=", self.weights
                
            avgendtime = time.time()
            print >> logs, "avg weights (trim) took %.1f seconds." % (avgendtime - iterendtime)
##            avgweights = self.decoder.model.new_weights()

            self.decoder.model.weights = Perceptron.weights # OK if noavg; see above
            Parser.State.model.weights = Perceptron.weights # multiprocessing: State.model is static

            prec = self.eval_on_dev()
            print >> logs, "eval on dev took %.1f seconds." % (time.time() - avgendtime)

            acc_steps += total_steps
            print >> logs, "at iter {0}, updates {1} (early {4}, er {10:.1f}%), dev {2}{7}, |w| {3}, time {5:.3f}h acctime {6:.3f}h; steps {8} cover {9:.1f}% accsteps {11}; bad {12} br {13:.1f}%"\
                  .format(it, num_updates, prec, len(Perceptron.weights), early_updates, \
                          (time.time() - iterstarttime)/3600,
                          (time.time() - starttime)/3600.,
                          "+" if prec > best_prec else "",
                          total_steps, 100.0*total_steps/Perceptron.trainsteps,
                          100.*early_updates/num_updates,
                          acc_steps,
                          bad_updates, 100.*bad_updates/num_updates) # 13 elements
            logs.flush()

            if prec > best_prec:
                best_prec = prec
                best_it = it
                best_wlen = len(Perceptron.weights)
                if not FLAGS.finaldump:
                    print >> logs, "new high at iteration {0}: {1}. Dumping Weights...".format(it, prec)
                    self.dump(Perceptron.weights, it)
                else:
                    Perceptron.best_weights = Perceptron.weights.copy()

            if self.avg:
                Perceptron.weights.reset_avg(Perceptron.c) # restore non-avg

            print >> logs, "gc can't reach", gc.collect()
            
            if FLAGS.mydouble:
                print >> logs, "mydouble usage and freed: %d %d ------------------------" % counts()

            logs.flush() # for hpc


        print >> logs, "peaked at iteration {0}: {1}, |bestw|= {2}.".format(best_it, best_prec, best_wlen)
        if FLAGS.finaldump:
            print >> logs, "Dumping best weights..."        
            self.dump(Perceptron.best_weights, best_it)
        print >> logs, "perceptron training of %d iterations finished on %s (took %.2f hours)"  % \
              (it, time.ctime(), (time.time() - starttime)/3600.)
Пример #7
0
from _mycollections import mydefaultdict
from mydouble import mydouble, counts

d = mydefaultdict(mydouble)  # always like that

d["a"] = 1                   # no need to say mydouble(1); transparent to the user
print d

print d.addc(d, 0.5)

for i in xrange(500000):
    d[str(i)] = 2

print len(d)

import gc
e = d.copy()
print "before", e["a"], counts()

for i in xrange(20):
#    e = e.deepcopy()
    e.iaddc(d, 0.5)
#    e.addc(d, 0.5)
    print e["a"], counts()
#    gc.collect()
#     del e
# ##    gc.collect()
#     e = f
    
print d["a"]
Пример #8
0
    def train(self):

        start_mem = memory()

        starttime = time.time()

        print >> logs, "starting perceptron at", time.ctime()

        best_prec = 0
        for it in xrange(1, self.iter + 1):

            print >> logs, "iteration %d starts..............%s" % (
                it, time.ctime())

            curr_mem = memory()
            iterstarttime = time.time()
            num_updates, early_updates = self.one_pass_on_train()
            iterendtime = time.time()

            print >> logs, "memory usage at iter %d: extra %s, total %s" % (
                it, human(memory(curr_mem)), human(memory(start_mem)))
            curr_mem = memory()

            print >> logs, "iteration %d training finished at %s. now evaluating on dev..." % (
                it, time.ctime())
            avgweights = self.avg_weights() if self.avg else self.weights
            avgendtime = time.time()
            print >> logs, "avg weights (trim) took %.1f seconds." % (
                avgendtime - iterendtime)
            if FLAGS.debuglevel >= 2:
                print >> logs, "avg w=", avgweights
            self.decoder.model.weights = avgweights
            prec = self.eval_on_dev()
            print >> logs, "eval on dev took %.1f seconds." % (time.time() -
                                                               avgendtime)


            print >> logs, "at iteration {0}, updates= {1} (early {4}), dev= {2}, |w|= {3}, time= {5:.3f}h acctime= {6:.3f}h"\
                  .format(it, num_updates, prec, len(avgweights), early_updates, \
                          (time.time() - iterstarttime)/3600, (time.time() - starttime)/3600.)
            logs.flush()

            if prec > best_prec:
                best_prec = prec
                best_it = it
                best_wlen = len(avgweights)
                print >> logs, "new high at iteration {0}: {1}. Dumping Weights...".format(
                    it, prec)
                self.dump(avgweights)

            self.decoder.model.weights = self.weights  # restore non-avg

            del avgweights
            gc.collect()

            if FLAGS.mydouble:
                from mydouble import counts
                print >> logs, "mydouble usage and freed: %d %d" % counts()

        print >> logs, "peaked at iteration {0}: {1}, |bestw|= {2}.".format(
            best_it, best_prec, best_wlen)
        print >> logs, "perceptron training of %d iterations finished on %s (took %.2f hours)"  % \
              (it, time.ctime(), (time.time() - starttime)/3600.)
Пример #9
0
    def train(self):

        start_mem = memory()

        starttime = time.time()

        print >> logs, "starting perceptron at", time.ctime()

        best_prec = 0
        for it in xrange(1, self.iter + 1):

            print >> logs, "iteration %d starts..............%s" % (
                it, time.ctime())

            curr_mem = memory()
            iterstarttime = time.time()
            self.decoder.num_edges = 0
            num_updates, early_updates, num_steps = self.one_pass_on_train()
            iterendtime = time.time()

            print >> logs, "memory usage at iter %d: extra %s, total %s" % (
                it, human(memory(curr_mem)), human(memory(start_mem)))
            if FLAGS.debuglevel >= 1:
                print >> logs, "weights=", self.weights

            curr_mem = memory()

            print >> logs, "iteration %d training finished at %s. now evaluating on dev..." % (
                it, time.ctime())
            ##            avgweights = self.avg_weights() if self.avg else self.weights

            avgtime = 0
            timer = Mytime()
            if self.avg:
                ##                print >> logs, "    w=", self.weights
                ##                print >> logs, " ".join(map(str, [x.get_step() for x in self.weights.values()]))
                self.weights.set_avg(self.c)
                avgtime += timer.gap()
                if FLAGS.debuglevel >= 1:
                    print >> logs, "avgweights=", self.weights

            prec = self.eval_on_dev()

            print >> logs, "eval on dev took %.1f seconds." % timer.gap()

            print >> logs, "at iteration {0}, updates= {1} (early {4}), dev= {2}{7}, |w|= {3}, time= {5:.3f}h acctime= {6:.3f}h, root={10:.1%}"\
                  .format(it, num_updates, prec, len(self.weights), early_updates,
                          (time.time() - iterstarttime)/3600,
                          (time.time() - starttime)/3600.,
                          "+" if prec > best_prec else "",
                          num_steps, self.decoder.num_edges,
                          prec.root())
            logs.flush()

            if prec > best_prec:
                best_prec = prec
                best_it = it
                best_wlen = len(self.weights)
                best_time = time.time() - starttime
                print >> logs, "new high at iteration {0}: {1}. Dumping Weights...".format(
                    it, prec)
                if not FLAGS.dump_last:
                    self.dump(self.weights)
                else:
                    self.bestweights = self.weights.deepcopy()

            if self.avg:
                timer = Mytime()
                self.weights.reset_avg(self.c)  # restore weights
                t = timer.gap()
                print >> logs, "avg weights (set/reset) took %.1f+%.1f=%.1f seconds." % (
                    avgtime, t, avgtime + t)


##            self.decoder.model.weights = self.weights # restore non-avg

##            del avgweights
            gc.collect()

            if FLAGS.mydouble:
                from mydouble import counts
                print >> logs, "mydouble usage and freed: %d %d" % counts()

        print >> logs, "peaked at iteration {0}: {1} ({3:.1f}h), |bestw|= {2}.".format(
            best_it, best_prec, best_wlen, best_time / 3600)
        print >> logs, best_prec.details()
        print >> logs, "perceptron training of %d iterations finished on %s (took %.2f hours)"  % \
              (it, time.ctime(), (time.time() - starttime)/3600.)

        if FLAGS.dump_last:
            self.dump(self.bestweights)