def __init__(self, buildPath, options, args): self.buildPath = buildPath self.options = options self.args = args self.cacheFolder = os.path.join(buildPath, '.ambuild2') self.dbpath = os.path.join(self.cacheFolder, 'graph') # This doesn't completely work yet because it's not communicated to child # processes. We'll have to send a message down or up to fix this. if self.options.no_color: util.DisableConsoleColors() with open(os.path.join(self.cacheFolder, 'vars'), 'rb') as fp: try: self.vars = util.pickle.load(fp) except ValueError as exn: sys.stderr.write('Build was configured with Python 3; use python3 instead.\n') sys.exit(1) except Exception as exn: if os.path.exists(os.path.join(self.cacheFolder, 'vars')): sys.stderr.write('There does not appear to be a build configured here.\n') else: sys.stderr.write('The build configured here looks corrupt; you will have to delete your objdir.\n') raise sys.exit(1) self.restore_environment() self.db = database.Database(self.dbpath) self.messagePump = MessagePump() self.procman = ProcessManager(self.messagePump) self.db.connect()
def __init__(self, pump, channel, task_graph, vars, num_processes): super(TaskMasterChild, self).__init__(pump, channel) self.task_graph = task_graph self.outstanding = {} self.idle = set() self.build_failed = False self.build_completed = False self.messageMap = { 'stop': lambda channel, message: self.receiveStop(channel, message) } self.procman = ProcessManager(pump) for i in range(num_processes): self.procman.spawn(WorkerParent(self), WorkerChild, args=(vars, )) self.channel.send({ 'id': 'spawned', 'pid': os.getpid(), 'type': 'taskmaster' })
def __init__(self, pump, channel, task_graph, vars, num_processes): super(TaskMasterChild, self).__init__(pump, channel) self.task_graph = task_graph self.outstanding = {} self.idle = set() self.build_failed = False self.build_completed = False self.messageMap = { 'stop': lambda channel, message: self.receiveStop(channel, message) } self.procman = ProcessManager(pump) for i in range(num_processes): self.procman.spawn( WorkerParent(self), WorkerChild, args=(vars,) ) self.channel.send({ 'id': 'spawned', 'pid': os.getpid(), 'type': 'taskmaster' })
class Context(object): def __init__(self, buildPath, options, args): self.buildPath = buildPath self.options = options self.args = args self.cacheFolder = os.path.join(buildPath, '.ambuild2') self.dbpath = os.path.join(self.cacheFolder, 'graph') # This doesn't completely work yet because it's not communicated to child # processes. We'll have to send a message down or up to fix this. if self.options.no_color: util.DisableConsoleColors() with open(os.path.join(self.cacheFolder, 'vars'), 'rb') as fp: try: self.vars = util.pickle.load(fp) except ValueError as exn: sys.stderr.write('Build was configured with Python 3; use python3 instead.\n') sys.exit(1) except Exception as exn: if os.path.exists(os.path.join(self.cacheFolder, 'vars')): sys.stderr.write('There does not appear to be a build configured here.\n') else: sys.stderr.write('The build configured here looks corrupt; you will have to delete your objdir.\n') raise sys.exit(1) self.restore_environment() self.db = database.Database(self.dbpath) self.messagePump = MessagePump() self.procman = ProcessManager(self.messagePump) self.db.connect() def __enter__(self): return self def __exit__(self, type, value, traceback): self.procman.shutdown() self.db.close() # Restore important environment properties that were present when this # build was configured. def restore_environment(self): if 'env' not in self.vars: return env = self.vars['env'] for key in env: os.environ[key] = env[key] def reconfigure(self): # See if we need to reconfigure. files = [] reconfigure_needed = False self.db.query_scripts(lambda row,path,stamp: files.append((path, stamp))) for path, stamp in files: if not os.path.exists(path) or os.path.getmtime(path) > stamp: reconfigure_needed = True break if not reconfigure_needed: return True util.con_out( util.ConsoleHeader, 'Reparsing build scripts.', util.ConsoleNormal ) # The database should be upgraded here, so we should always have an # API version set. api_version = self.db.query_var('api_version') assert api_version is not None if api_version == '2.0': from ambuild2.frontend.v2_0.amb2.gen import Generator elif api_version == '2.1': from ambuild2.frontend.v2_1.amb2 import Generator gen = Generator.FromVars(self.vars, self.db, self.options.refactor) try: gen.generate() except: traceback.print_exc() util.con_err( util.ConsoleRed, 'Failed to reparse build scripts.', util.ConsoleNormal ) return False # We flush the node cache after this, since database.py expects to get # never-before-seen items at the start. We could change this and make # nodes individually import, which might be cleaner. self.db.flush_caches() return True def Build(self): if not self.reconfigure(): return False return self.build_internal() def build_internal(self): if self.options.show_graph: self.db.printGraph() return True if self.options.show_changed: dmg_list = damage.ComputeDamageGraph(self.db, only_changed=True) for entry in dmg_list: if not entry.isFile(): continue print(entry.format()) return True dmg_graph = damage.ComputeDamageGraph(self.db) if not dmg_graph: return False # If we get here, we have to compute damage. if self.options.show_damage: dmg_graph.printGraph() return True dmg_graph.filter_commands() if self.options.show_commands: dmg_graph.printGraph() return True builder = Builder(self, dmg_graph) if self.options.show_steps: builder.printSteps() return True if not builder.update(): util.con_err( util.ConsoleHeader, 'Build failed.', util.ConsoleNormal ) return False util.con_out( util.ConsoleHeader, 'Build succeeded.', util.ConsoleNormal ) return True
class TaskMasterChild(ChildProcessListener): def __init__(self, pump, channel, task_graph, vars, num_processes): super(TaskMasterChild, self).__init__(pump, channel) self.task_graph = task_graph self.outstanding = {} self.idle = set() self.build_failed = False self.build_completed = False self.messageMap = { 'stop': lambda channel, message: self.receiveStop(channel, message) } self.procman = ProcessManager(pump) for i in range(num_processes): self.procman.spawn(WorkerParent(self), WorkerChild, args=(vars, )) self.channel.send({ 'id': 'spawned', 'pid': os.getpid(), 'type': 'taskmaster' }) def receiveStop(self, channel, message): if not message['ok']: self.terminateBuild() self.close_idle() def terminateBuild(self): if self.build_failed: return self.build_failed = True self.close_idle() def receiveClose(self, channel): self.procman.shutdown() self.pump.cancel() def onWorkerResults(self, child, message): # Forward the results to the master process. self.channel.send(message) if not message['ok']: self.channel.send({ 'id': 'completed', 'status': 'failed', }) self.procman.close(child) self.terminateBuild() return task_id = message['task_id'] task, child = self.outstanding[task_id] del self.outstanding[task_id] # Enqueue any tasks that can be run if this was their last outstanding # dependency. for outgoing in task.outgoing: outgoing.incoming.remove(task) if len(outgoing.incoming) == 0: self.task_graph.append(outgoing) self.onWorkerReady(child, None) # If more stuff was queued, and we have idle processes, use them. while len(self.task_graph) and len(self.idle): child = self.idle.pop() if not self.onWorkerReady(child, None): break def close_idle(self): for child in self.idle: self.procman.close(child) self.idle = set() def onWorkerReady(self, child, message): if message and not message['finished']: self.channel.send({ 'id': 'spawned', 'pid': child.pid, 'type': 'worker' }) # If the build failed, ignore the message, and shutdown the process. if self.build_failed: self.procman.close(child) self.maybe_request_shutdown(child) return if not len(self.task_graph): if len(self.outstanding): # There are still tasks left to complete, but they're waiting on # others to finish. Mark this process as ready and just ignore the # status change for now. self.idle.add(child) else: # There are no tasks remaining, the worker is not needed. self.build_completed = True self.procman.close(child) self.close_idle() self.channel.send({'id': 'completed', 'status': 'ok'}) return False # Send a task to the worker. task = self.task_graph.pop() message = { 'id': 'task', 'task_id': task.id, 'task_type': task.type, 'task_data': task.data, 'task_folder': task.folder, 'task_outputs': task.outputs } child.send(message) self.outstanding[task.id] = (task, child) return True def onWorkerCrashed(self, child, task): self.channel.send({ 'id': 'completed', 'status': 'crashed', 'task_id': task.id, }) self.terminateBuild() def onWorkerDied(self, child, error): if error != Error.NormalShutdown: for task_id in self.outstanding.keys(): task, task_child = self.outstanding[task_id] if task_child == child: # A worker failed, but crashed, so we have to tell the main process. self.onWorkerCrashed(child, task) break self.idle.discard(child) self.maybe_request_shutdown(child) def maybe_request_shutdown(self, child): for other_child in self.procman.children: if other_child == child: continue if other_child.is_alive(): return # If we got here, no other child processes are live, so we can ask for # safe shutdown. This is needed to make sure all our messages arrive, # since closing one end of the pipe destroys any leftover data. self.channel.send({'id': 'done'})
class TaskMasterChild(ChildProcessListener): def __init__(self, pump, channel, task_graph, vars, num_processes): super(TaskMasterChild, self).__init__(pump, channel) self.task_graph = task_graph self.outstanding = {} self.idle = set() self.build_failed = False self.build_completed = False self.messageMap = { 'stop': lambda channel, message: self.receiveStop(channel, message) } self.procman = ProcessManager(pump) for i in range(num_processes): self.procman.spawn( WorkerParent(self), WorkerChild, args=(vars,) ) self.channel.send({ 'id': 'spawned', 'pid': os.getpid(), 'type': 'taskmaster' }) def receiveStop(self, channel, message): if not message['ok']: self.terminateBuild() self.close_idle() def terminateBuild(self): if self.build_failed: return self.build_failed = True self.close_idle() def receiveClose(self, channel): self.procman.shutdown() self.pump.cancel() def onWorkerResults(self, child, message): # Forward the results to the master process. self.channel.send(message) if not message['ok']: self.channel.send({ 'id': 'completed', 'status': 'failed', }) self.procman.close(child) self.terminateBuild() return task_id = message['task_id'] task, child = self.outstanding[task_id] del self.outstanding[task_id] # Enqueue any tasks that can be run if this was their last outstanding # dependency. for outgoing in task.outgoing: outgoing.incoming.remove(task) if len(outgoing.incoming) == 0: self.task_graph.append(outgoing) self.onWorkerReady(child, None) # If more stuff was queued, and we have idle processes, use them. while len(self.task_graph) and len(self.idle): child = self.idle.pop() if not self.onWorkerReady(child, None): break def close_idle(self): for child in self.idle: self.procman.close(child) self.idle = set() def onWorkerReady(self, child, message): if message and not message['finished']: self.channel.send({ 'id': 'spawned', 'pid': child.pid, 'type': 'worker' }) # If the build failed, ignore the message, and shutdown the process. if self.build_failed: self.procman.close(child) self.maybe_request_shutdown(child) return if not len(self.task_graph): if len(self.outstanding): # There are still tasks left to complete, but they're waiting on # others to finish. Mark this process as ready and just ignore the # status change for now. self.idle.add(child) else: # There are no tasks remaining, the worker is not needed. self.build_completed = True self.procman.close(child) self.close_idle() self.channel.send({ 'id': 'completed', 'status': 'ok' }) return False # Send a task to the worker. task = self.task_graph.pop() message = { 'id': 'task', 'task_id': task.id, 'task_type': task.type, 'task_data': task.data, 'task_folder': task.folder, 'task_outputs': task.outputs } child.send(message) self.outstanding[task.id] = (task, child) return True def onWorkerCrashed(self, child, task): self.channel.send({ 'id': 'completed', 'status': 'crashed', 'task_id': task.id, }) self.terminateBuild() def onWorkerDied(self, child, error): if error != Error.NormalShutdown: for task_id in self.outstanding.keys(): task, task_child = self.outstanding[task_id] if task_child == child: # A worker failed, but crashed, so we have to tell the main process. self.onWorkerCrashed(child, task) break self.idle.discard(child) self.maybe_request_shutdown(child) def maybe_request_shutdown(self, child): for other_child in self.procman.children: if other_child == child: continue if other_child.is_alive(): return # If we got here, no other child processes are live, so we can ask for # safe shutdown. This is needed to make sure all our messages arrive, # since closing one end of the pipe destroys any leftover data. self.channel.send({ 'id': 'done' })
class Context(object): def __init__(self, buildPath, options, args): self.buildPath = buildPath self.options = options self.args = args self.cacheFolder = os.path.join(buildPath, '.ambuild2') self.dbpath = os.path.join(self.cacheFolder, 'graph') # This doesn't completely work yet because it's not communicated to child # processes. We'll have to send a message down or up to fix this. if self.options.no_color: util.DisableConsoleColors() with open(os.path.join(self.cacheFolder, 'vars'), 'rb') as fp: try: self.vars = util.pickle.load(fp) except ValueError as exn: sys.stderr.write( 'Build was configured with Python 3; use python3 instead.\n' ) sys.exit(1) except Exception as exn: if os.path.exists(os.path.join(self.cacheFolder, 'vars')): sys.stderr.write( 'There does not appear to be a build configured here.\n' ) else: sys.stderr.write( 'The build configured here looks corrupt; you will have to delete your objdir.\n' ) raise sys.exit(1) self.restore_environment() self.db = database.Database(self.dbpath) self.messagePump = MessagePump() self.procman = ProcessManager(self.messagePump) self.db.connect() def __enter__(self): return self def __exit__(self, type, value, traceback): self.procman.shutdown() self.db.close() # Restore important environment properties that were present when this # build was configured. def restore_environment(self): if 'env' not in self.vars: return env = self.vars['env'] for key in env: os.environ[key] = env[key] def reconfigure(self): # See if we need to reconfigure. files = [] reconfigure_needed = False self.db.query_scripts(lambda row, path, stamp: files.append( (path, stamp))) for path, stamp in files: if not os.path.exists(path) or os.path.getmtime(path) > stamp: reconfigure_needed = True break if not reconfigure_needed: return True util.con_out(util.ConsoleHeader, 'Reparsing build scripts.', util.ConsoleNormal) # The database should be upgraded here, so we should always have an # API version set. api_version = self.db.query_var('api_version') assert api_version is not None if api_version == '2.0': from ambuild2.frontend.v2_0.amb2.gen import Generator elif api_version == '2.1': from ambuild2.frontend.v2_1.amb2 import Generator gen = Generator.FromVars(self.vars, self.db, self.options.refactor) try: gen.generate() except: traceback.print_exc() util.con_err(util.ConsoleRed, 'Failed to reparse build scripts.', util.ConsoleNormal) return False # We flush the node cache after this, since database.py expects to get # never-before-seen items at the start. We could change this and make # nodes individually import, which might be cleaner. self.db.flush_caches() return True def Build(self): if not self.reconfigure(): return False return self.build_internal() def build_internal(self): if self.options.show_graph: self.db.printGraph() return True if self.options.show_changed: dmg_list = damage.ComputeDamageGraph(self.db, only_changed=True) for entry in dmg_list: if not entry.isFile(): continue print(entry.format()) return True dmg_graph = damage.ComputeDamageGraph(self.db) if not dmg_graph: return False # If we get here, we have to compute damage. if self.options.show_damage: dmg_graph.printGraph() return True dmg_graph.filter_commands() if self.options.show_commands: dmg_graph.printGraph() return True builder = Builder(self, dmg_graph) if self.options.show_steps: builder.printSteps() return True if not builder.update(): util.con_err(util.ConsoleHeader, 'Build failed.', util.ConsoleNormal) return False util.con_out(util.ConsoleHeader, 'Build succeeded.', util.ConsoleNormal) return True