def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, dryRun=False, quiet=False): """Updates all targets. Run jobs instances of bakefile simultaneously""" if verbose: if alwaysMakeAll: print 'pretending all makefiles are out of date...' else: print 'determining which makefiles are out of date...' needUpdate = [] total = 0 # load the state file with dependencies even when using --always-make # so that running bakefile_gen --always-make doesn't invalidate all # dependencies if it doesn't finish: try: dependencies.load('.bakefile_gen.state') except IOError: pass if alwaysMakeAll: # uncoditionally add all bakefiles to the list of bakefiles which # need to be regenerated: for f in files: for fmt in files[f].formats: total += 1 needUpdate.append((f,fmt)) else: # load bakefile_gen state file and choose only bakefiles out of date: for f in files: for fmt in files[f].formats: total += 1 if dependencies.needsUpdate(os.path.abspath(f), fmt, cmdline=files[f].flags[fmt]): needUpdate.append((f,fmt)) totalNeedUpdate = len(needUpdate) if verbose: print ' ...%i out of %i will be updated' % (totalNeedUpdate, total) class JobDesc: def __init__(self, data, jobNum, xmlcache, pretend=False): self.filename, self.format = data self.jobNum = jobNum self.xmlcache = xmlcache self.pretend = pretend self.tempDeps = portautils.mktemp('bakefile') self.tempChanges = portautils.mktemp('bakefile') self.process = None def run(self): """Starts the subprocess.""" if not quiet: print '[%i/%i] generating %s from %s' % ( self.jobNum, totalNeedUpdate, self.format, self.filename) sys.stdout.flush() cmd = _getBakefileExecutable() cmd.append('-f%s' % _get_base_format(self.format)) cmd += files[self.filename].flags[self.format] cmd.append('--output-deps=%s' % self.tempDeps) cmd.append('--output-changes=%s' % self.tempChanges) cmd.append('--xml-cache=%s' % self.xmlcache) if quiet: cmd.append('--quiet') elif verbose >= 2: cmd.append('-v') if dryRun: cmd.append('--dry-run') cmd.append(self.filename) if verbose: print ' '.join(cmd) if not pretend: self.process = subprocess.Popen(cmd) def poll(self): if self.pretend or self.process == None: return True return self.process.poll() != None def wait(self): if self.pretend or self.process == None: return True return self.process.wait() != None def finish(self): try: try: # NB: "finally" section below is still called after these # return statements if self.pretend: return 0 if self.process == None: return 0 if self.process.returncode == 0: dependencies.load(self.tempDeps) dependencies.addCmdLine(os.path.abspath(self.filename), self.format, files[self.filename].flags[self.format]) return _countLines(self.tempChanges) else: # failed, returncode != 0 if keepGoing: sys.stderr.write( '[bakefile_gen] bakefile exited with error (%i), ignoring\n' % self.process.returncode) return 0 # no modified files else: raise errors.Error('bakefile exited with error (%i)' % self.process.returncode) except IOError, e: raise errors.Error('failed to run bakefile: %s' % e) finally: os.remove(self.tempDeps) os.remove(self.tempChanges) modifiedFiles = 0 jobNum = 0 childProcessesCnt = 0 childProcesses = [None for i in range(0,jobs)] # NB: Pre-parsed XML cache doesn't use file locking, so we have to ensure # only one bakefile process will be using it at a time, by having #jobs # caches. Hopefully no big deal, the cache is only really useful on # large projects with shared fies and it fills pretty quickly in that # case (FIXME?) tempXmlCacheDir = portautils.mktempdir('bakefile') tempXmlCacheFiles = [os.path.join(tempXmlCacheDir, 'xmlcache%i' % i) for i in range(0,jobs)] try: try: while len(needUpdate) > 0 or childProcessesCnt > 0: # start new processes: for p in range(0,jobs): if len(needUpdate) > 0 and childProcesses[p] == None: jobNum += 1 childProcessesCnt += 1 childProcesses[p] = JobDesc(needUpdate.pop(), jobNum, tempXmlCacheFiles[p], pretend) childProcesses[p].run() # check for finished processes: for p in range(0,jobs): pr = childProcesses[p] if pr != None and pr.poll(): childProcessesCnt -= 1 childProcesses[p] = None modifiedFiles += pr.finish() # FIXME: Unfortunately, Python's subprocess module doesn't # provide any way to get notified about child process' # termination, so we have to poll it. Wait a bit here to # avoid 100% CPU usage. time.sleep(0.01) # NB: using "finally" instead of "except" so that we can easily handle # both Exception and KeyboardInterrupt (which is not Exception) # and also to preserve exception stack trace finally: left = [p for p in childProcesses if p != None] if len(left) > 0: print '[bakefile_gen] waiting for remaining jobs to finish after error...' for p in left: try: p.wait() modifiedFiles += p.finish() except Exception, e: pass # ignore further errors finally: shutil.rmtree(tempXmlCacheDir, ignore_errors=True) dependencies.save('.bakefile_gen.state') if not quiet: if dryRun: print '%i files would be modified' % modifiedFiles else: print '%i files modified' % modifiedFiles
xmlparser.cache.close() xmlparser.cache = None if not read_ok: sys.exit(1) if options.dump: import mk_dump mk_dump.dumpMakefile() else: if not writer.write(): sys.exit(1) if config.track_deps: import dependencies dependencies.save(config.deps_file) if __name__ == '__main__': if sys.version_info[0:3] < (2,3,0): sys.stderr.write('error: Bakefile requires at least Python 2.3.0\n') sys.exit(1) do_profiling = 0 # set to 1 if debugging bottlenecks try: if do_profiling: import hotshot prof = hotshot.Profile('bakefile.prof') prof.runcall(run, sys.argv[1:]) prof.close() else:
xmlparser.cache.close() xmlparser.cache = None if not read_ok: sys.exit(1) if options.dump: import mk_dump mk_dump.dumpMakefile() else: if not writer.write(): sys.exit(1) if config.track_deps: import dependencies dependencies.save(config.deps_file) if __name__ == '__main__': if sys.version_info[0:3] < (2, 3, 0): sys.stderr.write('error: Bakefile requires at least Python 2.3.0\n') sys.exit(1) do_profiling = 0 # set to 1 if debugging bottlenecks try: if do_profiling: import hotshot prof = hotshot.Profile('bakefile.prof') prof.runcall(run, sys.argv[1:]) prof.close()
def updateTargets(jobs, pretend=False, keepGoing=False, alwaysMakeAll=False, dryRun=False, quiet=False): """Updates all targets. Run jobs instances of bakefile simultaneously""" if verbose: if alwaysMakeAll: print 'pretending all makefiles are out of date...' else: print 'determining which makefiles are out of date...' needUpdate = [] total = 0 # load the state file with dependencies even when using --always-make # so that running bakefile_gen --always-make doesn't invalidate all # dependencies if it doesn't finish: try: dependencies.load('.bakefile_gen.state') except IOError: pass if alwaysMakeAll: # uncoditionally add all bakefiles to the list of bakefiles which # need to be regenerated: for f in files: for fmt in files[f].formats: total += 1 needUpdate.append((f, fmt)) else: # load bakefile_gen state file and choose only bakefiles out of date: for f in files: for fmt in files[f].formats: total += 1 if dependencies.needsUpdate(os.path.abspath(f), fmt, cmdline=files[f].flags[fmt]): needUpdate.append((f, fmt)) totalNeedUpdate = len(needUpdate) if verbose: print ' ...%i out of %i will be updated' % (totalNeedUpdate, total) class JobDesc: def __init__(self, data, jobNum, xmlcache, pretend=False): self.filename, self.format = data self.jobNum = jobNum self.xmlcache = xmlcache self.pretend = pretend self.tempDeps = portautils.mktemp('bakefile') self.tempChanges = portautils.mktemp('bakefile') self.process = None def run(self): """Starts the subprocess.""" if not quiet: print '[%i/%i] generating %s from %s' % ( self.jobNum, totalNeedUpdate, self.format, self.filename) sys.stdout.flush() cmd = _getBakefileExecutable() cmd.append('-f%s' % _get_base_format(self.format)) cmd += files[self.filename].flags[self.format] cmd.append('--output-deps=%s' % self.tempDeps) cmd.append('--output-changes=%s' % self.tempChanges) cmd.append('--xml-cache=%s' % self.xmlcache) if quiet: cmd.append('--quiet') elif verbose >= 2: cmd.append('-v') if dryRun: cmd.append('--dry-run') cmd.append(self.filename) if verbose: print ' '.join(cmd) if not pretend: self.process = subprocess.Popen(cmd) def poll(self): if self.pretend or self.process == None: return True return self.process.poll() != None def wait(self): if self.pretend or self.process == None: return True return self.process.wait() != None def finish(self): try: try: # NB: "finally" section below is still called after these # return statements if self.pretend: return 0 if self.process == None: return 0 if self.process.returncode == 0: dependencies.load(self.tempDeps) dependencies.addCmdLine( os.path.abspath(self.filename), self.format, files[self.filename].flags[self.format]) return _countLines(self.tempChanges) else: # failed, returncode != 0 if keepGoing: sys.stderr.write( '[bakefile_gen] bakefile exited with error (%i), ignoring\n' % self.process.returncode) return 0 # no modified files else: raise errors.Error( 'bakefile exited with error (%i)' % self.process.returncode) except IOError, e: raise errors.Error('failed to run bakefile: %s' % e) finally: os.remove(self.tempDeps) os.remove(self.tempChanges) modifiedFiles = 0 jobNum = 0 childProcessesCnt = 0 childProcesses = [None for i in range(0, jobs)] # NB: Pre-parsed XML cache doesn't use file locking, so we have to ensure # only one bakefile process will be using it at a time, by having #jobs # caches. Hopefully no big deal, the cache is only really useful on # large projects with shared fies and it fills pretty quickly in that # case (FIXME?) tempXmlCacheDir = portautils.mktempdir('bakefile') tempXmlCacheFiles = [ os.path.join(tempXmlCacheDir, 'xmlcache%i' % i) for i in range(0, jobs) ] try: try: while len(needUpdate) > 0 or childProcessesCnt > 0: # start new processes: for p in range(0, jobs): if len(needUpdate) > 0 and childProcesses[p] == None: jobNum += 1 childProcessesCnt += 1 childProcesses[p] = JobDesc(needUpdate.pop(), jobNum, tempXmlCacheFiles[p], pretend) childProcesses[p].run() # check for finished processes: for p in range(0, jobs): pr = childProcesses[p] if pr != None and pr.poll(): childProcessesCnt -= 1 childProcesses[p] = None modifiedFiles += pr.finish() # FIXME: Unfortunately, Python's subprocess module doesn't # provide any way to get notified about child process' # termination, so we have to poll it. Wait a bit here to # avoid 100% CPU usage. time.sleep(0.01) # NB: using "finally" instead of "except" so that we can easily handle # both Exception and KeyboardInterrupt (which is not Exception) # and also to preserve exception stack trace finally: left = [p for p in childProcesses if p != None] if len(left) > 0: print '[bakefile_gen] waiting for remaining jobs to finish after error...' for p in left: try: p.wait() modifiedFiles += p.finish() except Exception, e: pass # ignore further errors finally: shutil.rmtree(tempXmlCacheDir, ignore_errors=True) dependencies.save('.bakefile_gen.state') if not quiet: if dryRun: print '%i files would be modified' % modifiedFiles else: print '%i files modified' % modifiedFiles