def _start_annotating(self, event_source, event): distbuild.crash_point() self._artifact = event.artifact self._helper_id = self._idgen.next() artifact_names = [] def set_state_and_append(artifact): artifact.state = UNKNOWN artifact_names.append(artifact.basename()) map_build_graph(self._artifact, set_state_and_append) url = urlparse.urljoin(self._artifact_cache_server, '/1.0/artifacts') msg = distbuild.message('http-request', id=self._helper_id, url=url, headers={'Content-type': 'application/json'}, body=json.dumps(artifact_names), method='POST') request = distbuild.HelperRequest(msg) self.mainloop.queue_event(distbuild.HelperRouter, request) logging.debug('Made cache request for state of artifacts ' '(helper id: %s)' % self._helper_id)
def _disconnected(self, event_source, event): distbuild.crash_point() logging.debug('WC: Disconnected from worker %s' % self.name()) self.mainloop.queue_event(InitiatorConnection, _Disconnected(self)) self.mainloop.queue_event(self._cm, distbuild.Reconnect())
def _start_build(self, event_source, event): distbuild.crash_point() self._job = event.job self._helper_id = None self._exec_response_msg = None logging.debug('WC: starting build: %s for %s' % (self._job.artifact.name, self._job.initiators)) argv = [ self._morph_instance, 'worker-build', '--build-log-on-stdout', self._job.artifact.name, ] msg = distbuild.message('exec-request', id=self._job.id, argv=argv, stdin_contents=distbuild.serialise_artifact(self._job.artifact), ) self._jm.send(msg) if self._debug_json: logging.debug('WC: sent to worker %s: %r' % (self._worker_name, msg)) started = WorkerBuildStepStarted(self._job.initiators, self._job.artifact.source.cache_key, self.name()) self.mainloop.queue_event(WorkerConnection, _JobStarted(self._job)) self.mainloop.queue_event(WorkerConnection, started)
def setup(self): distbuild.crash_point() logging.debug('WC: Setting up instance %s' % repr(self)) self._jm = distbuild.JsonMachine(self._conn) self.mainloop.add_state_machine(self._jm) spec = [ # state, source, event_class, new_state, callback ('idle', self._jm, distbuild.JsonEof, None, self._disconnected), ('idle', self, _HaveAJob, 'building', self._start_build), ('building', distbuild.BuildController, distbuild.BuildCancel, 'building', self._maybe_cancel), ('building', self._jm, distbuild.JsonEof, None, self._disconnected), ('building', self._jm, distbuild.JsonNewMessage, 'building', self._handle_json_message), ('building', self, _BuildFailed, 'idle', self._request_job), ('building', self, _BuildCancelled, 'idle', self._request_job), ('building', self, _BuildFinished, 'caching', self._request_caching), ('caching', self._jm, distbuild.JsonEof, None, self._disconnected), ('caching', distbuild.HelperRouter, distbuild.HelperResult, 'caching', self._maybe_handle_helper_result), ('caching', self, _Cached, 'idle', self._request_job), ('caching', self, _BuildFailed, 'idle', self._request_job), ] self.add_transitions(spec) self._request_job(None, None)
def _handle_worker(self, event_source, event): distbuild.crash_point() who = event.who last_job = who.job() # the job this worker's just completed if last_job: logging.debug('%s wants new job, just did %s', who.name(), last_job.artifact.basename()) logging.debug('Removing job %s with job id %s', last_job.artifact.basename(), last_job.id) self._jobs.remove(last_job) else: logging.debug('%s wants its first job', who.name()) logging.debug('WBQ: Adding worker to queue: %s', event.who.name()) self._available_workers.append(event) logging.debug('Current jobs: %s', self._jobs) logging.debug('Workers available: %d', len(self._available_workers)) job = self._jobs.get_next_job() if job: self._give_job(job)
def _maybe_check_result_and_queue_more_builds(self, event_source, event): distbuild.crash_point() if self._request['id'] not in event.msg['ids']: return # not for us artifact = self._find_artifact(event.artifact_cache_key) if artifact is None: # This is not the event you are looking for. return logging.debug( 'Got build result for %s: %s', artifact.name, repr(event.msg)) finished = BuildStepFinished( self._request['id'], build_step_name(artifact)) self.mainloop.queue_event(BuildController, finished) artifact.state = BUILT def set_state(a): if a.source == artifact.source: a.state = BUILT if artifact.source.morphology['kind'] == 'chunk': # Building a single chunk artifact # yields all chunk artifacts for the given source # so we set the state of this source's artifacts # to BUILT map_build_graph(self._artifact, set_state) self._queue_worker_builds(None, event)
def _start_graphing(self, event_source, event): distbuild.crash_point() logging.info('Start constructing build graph') self._artifact_data = distbuild.StringBuffer() self._artifact_error = distbuild.StringBuffer() argv = [ self._morph_instance, 'serialise-artifact', '--quiet', self._request['repo'], self._request['ref'], self._request['morphology'], ] if 'original_ref' in self._request: argv.append(self._request['original_ref']) msg = distbuild.message('exec-request', id=self._idgen.next(), argv=argv, stdin_contents='') self._helper_id = msg['id'] req = distbuild.HelperRequest(msg) self.mainloop.queue_event(distbuild.HelperRouter, req) progress = BuildProgress(self._request['id'], 'Computing build graph') self.mainloop.queue_event(BuildController, progress)
def _maybe_handle_helper_result(self, event_source, event): if event.msg['id'] == self._helper_id: distbuild.crash_point() logging.debug('caching: event.msg: %s' % repr(event.msg)) if event.msg['status'] == httplib.OK: logging.debug('Shared artifact cache population done') new_event = WorkerBuildFinished( self._exec_response_msg, self._job.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, new_event) self.mainloop.queue_event(self, _Cached()) else: logging.error( 'Failed to populate artifact cache: %s %s' % (event.msg['status'], event.msg['body'])) # We will attempt to remove this job twice # unless we mark it as failed before the BuildController # processes the WorkerBuildFailed event. # # The BuildController will not try to cancel jobs that have # been marked as failed. self.mainloop.queue_event(WorkerConnection, _JobFailed(self._job)) new_event = WorkerBuildFailed( self._exec_response_msg, self._job.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, new_event) self.mainloop.queue_event(self, _BuildFailed()) self.mainloop.queue_event(WorkerConnection, _JobFinished(self._job))
def setup(self): distbuild.crash_point() self._jm = distbuild.JsonMachine(self._conn) self.mainloop.add_state_machine(self._jm) logging.debug('initiator: _jm=%s' % repr(self._jm)) spec = [ # state, source, event_class, new_state, callback ('waiting', self._jm, distbuild.JsonEof, None, self._terminate), ('waiting', self._jm, distbuild.JsonNewMessage, 'waiting', self._handle_json_message), ('waiting', self, _Finished, None, self._succeed), ('waiting', self, _Failed, None, self._fail), ] self.add_transitions(spec) random_id = random.randint(0, 2**32 - 1) self._app.status(msg='Requesting build of %(repo)s %(ref)s %(morph)s', repo=self._repo_name, ref=self._ref, morph=self._morphology) msg = distbuild.message('build-request', id=random_id, repo=self._repo_name, ref=self._ref, morphology=self._morphology, original_ref=self._original_ref) self._jm.send(msg) logging.debug('Initiator: sent to controller: %s', repr(msg))
def _start_build(self, event_source, event): distbuild.crash_point() self._job = event.job self._helper_id = None self._exec_response_msg = None logging.debug('WC: starting build: %s for %s' % (self._job.artifact.name, self._job.initiators)) argv = [ self._morph_instance, 'worker-build', '--build-log-on-stdout', self._job.artifact.name, ] msg = distbuild.message( 'exec-request', id=self._job.id, argv=argv, stdin_contents=distbuild.serialise_artifact(self._job.artifact), ) self._jm.send(msg) if self._debug_json: logging.debug('WC: sent to worker %s: %r' % (self._worker_name, msg)) started = WorkerBuildStepStarted(self._job.initiators, self._job.artifact.source.cache_key, self.name()) self.mainloop.queue_event(WorkerConnection, _JobStarted(self._job)) self.mainloop.queue_event(WorkerConnection, started)
def setup(self): distbuild.crash_point() logging.debug('WBQ: Setting up %s' % self) self._available_workers = [] self._jobs = Jobs( distbuild.IdentifierGenerator('WorkerBuildQueuerJob')) spec = [ # state, source, event_class, new_state, callback ('idle', WorkerBuildQueuer, WorkerBuildRequest, 'idle', self._handle_request), ('idle', WorkerBuildQueuer, WorkerCancelPending, 'idle', self._handle_cancel), ('idle', WorkerConnection, _NeedJob, 'idle', self._handle_worker), ('idle', WorkerConnection, _JobStarted, 'idle', self._set_job_started), ('idle', WorkerConnection, _JobFinished, 'idle', self._set_job_finished), ('idle', WorkerConnection, _JobFailed, 'idle', self._set_job_failed), ('idle', WorkerConnection, _Disconnected, 'idle', self._handle_worker_disconnected), ] self.add_transitions(spec)
def setup(self): distbuild.crash_point() logging.debug('WC: Setting up instance %s' % repr(self)) self._jm = distbuild.JsonMachine(self._conn) self.mainloop.add_state_machine(self._jm) spec = [ # state, source, event_class, new_state, callback ('idle', self._jm, distbuild.JsonEof, None, self._reconnect), ('idle', self, _HaveAJob, 'building', self._start_build), ('building', distbuild.BuildController, distbuild.BuildCancel, 'building', self._maybe_cancel), ('building', self._jm, distbuild.JsonEof, None, self._reconnect), ('building', self._jm, distbuild.JsonNewMessage, 'building', self._handle_json_message), ('building', self, _BuildFailed, 'idle', self._request_job), ('building', self, _BuildCancelled, 'idle', self._request_job), ('building', self, _BuildFinished, 'caching', self._request_caching), ('caching', distbuild.HelperRouter, distbuild.HelperResult, 'caching', self._maybe_handle_helper_result), ('caching', self, _Cached, 'idle', self._request_job), ('caching', self, _BuildFailed, 'idle', self._request_job), ] self.add_transitions(spec) self._request_job(None, None)
def _maybe_check_result_and_queue_more_builds(self, event_source, event): distbuild.crash_point() if self._request['id'] not in event.msg['ids']: return # not for us artifact = self._find_artifact(event.artifact_cache_key) if artifact is None: # This is not the event you are looking for. return logging.debug('Got build result for %s: %s', artifact.name, repr(event.msg)) finished = BuildStepFinished(self._request['id'], build_step_name(artifact)) self.mainloop.queue_event(BuildController, finished) artifact.state = BUILT def set_state(a): if a.source == artifact.source: a.state = BUILT if artifact.source.morphology['kind'] == 'chunk': # Building a single chunk artifact # yields all chunk artifacts for the given source # so we set the state of this source's artifacts # to BUILT map_build_graph(self._artifact, set_state) self._queue_worker_builds(None, event)
def _maybe_finish_graph(self, event_source, event): distbuild.crash_point() def notify_success(artifact): logging.debug('Graph is finished') progress = BuildProgress( self._request['id'], 'Finished computing build graph') self.mainloop.queue_event(BuildController, progress) build_steps = BuildSteps(self._request['id'], artifact) self.mainloop.queue_event(BuildController, build_steps) self.mainloop.queue_event(self, _GotGraph(artifact)) if event.msg['id'] == self._helper_id: self._helper_id = None error_text = self._artifact_error.peek() if event.msg['exit'] != 0 or error_text: self.fail(error_text) if event.msg['exit'] != 0: return text = self._artifact_data.peek() try: artifact = distbuild.deserialise_artifact(text) except ValueError, e: logging.error(traceback.format_exc()) self.fail('Failed to compute build graph: %s' % e) return notify_success(artifact)
def setup(self): distbuild.crash_point() self._jm = distbuild.JsonMachine(self._conn) self.mainloop.add_state_machine(self._jm) logging.debug('initiator: _jm=%s' % repr(self._jm)) spec = [ # state, source, event_class, new_state, callback ('waiting', self._jm, distbuild.JsonEof, None, self._terminate), ('waiting', self._jm, distbuild.JsonNewMessage, 'waiting', self._handle_json_message), ('waiting', self, _Finished, None, self._succeed), ('waiting', self, _Failed, None, self._fail), ] self.add_transitions(spec) random_id = random.randint(0, 2**32-1) self._app.status( msg='Requesting build of %(repo)s %(ref)s %(morph)s', repo=self._repo_name, ref=self._ref, morph=self._morphology) msg = distbuild.message('build-request', id=random_id, repo=self._repo_name, ref=self._ref, morphology=self._morphology, original_ref=self._original_ref, protocol_version=distbuild.protocol.VERSION ) self._jm.send(msg) logging.debug('Initiator: sent to controller: %s', repr(msg))
def _maybe_handle_helper_result(self, event_source, event): if event.msg['id'] == self._helper_id: distbuild.crash_point() logging.debug('caching: event.msg: %s' % repr(event.msg)) if event.msg['status'] == httplib.OK: logging.debug('Shared artifact cache population done') new_event = WorkerBuildFinished( self._exec_response_msg, self._job.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, new_event) self.mainloop.queue_event(self, _Cached()) else: logging.error('Failed to populate artifact cache: %s %s' % (event.msg['status'], event.msg['body'])) # We will attempt to remove this job twice # unless we mark it as failed before the BuildController # processes the WorkerBuildFailed event. # # The BuildController will not try to cancel jobs that have # been marked as failed. self.mainloop.queue_event(WorkerConnection, _JobFailed(self._job)) new_event = WorkerBuildFailed( self._exec_response_msg, self._job.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, new_event) self.mainloop.queue_event(self, _BuildFailed()) self.mainloop.queue_event(WorkerConnection, _JobFinished(self._job))
def __init__(self, initiator_connection, build_request_message, artifact_cache_server, morph_instance): distbuild.crash_point() distbuild.StateMachine.__init__(self, 'init') self._initiator_connection = initiator_connection self._request = build_request_message self._artifact_cache_server = artifact_cache_server self._morph_instance = morph_instance self._helper_id = None self.debug_transitions = False self.debug_graph_state = False
def _notify_build_done(self, event_source, event): distbuild.crash_point() logging.debug('Notifying initiator of successful build') baseurl = urlparse.urljoin( self._artifact_cache_server, '/1.0/artifacts') filename = ('%s.%s.%s' % (self._artifact.source.cache_key, self._artifact.source.morphology['kind'], self._artifact.name)) url = '%s?filename=%s' % (baseurl, urllib.quote(filename)) finished = BuildFinished(self._request['id'], [url]) self.mainloop.queue_event(BuildController, finished)
def _maybe_relay_build_caching(self, event_source, event): distbuild.crash_point() if self._request['id'] not in event.initiators: return # not for us artifact = self._find_artifact(event.artifact_cache_key) if artifact is None: # This is not the event you are looking for. return progress = BuildProgress( self._request['id'], 'Transferring %s to shared artifact cache' % artifact.name) self.mainloop.queue_event(BuildController, progress)
def _handle_json_message(self, event_source, event): '''Handle JSON messages from the worker.''' distbuild.crash_point() logging.debug('WC: from worker %s: %r' % (self._worker_name, event.msg)) handlers = { 'exec-output': self._handle_exec_output, 'exec-response': self._handle_exec_response, } handler = handlers[event.msg['type']] handler(event.msg)
def _handle_json_message(self, event_source, event): '''Handle JSON messages from the worker.''' distbuild.crash_point() logging.debug( 'WC: from worker %s: %r' % (self._worker_name, event.msg)) handlers = { 'exec-output': self._handle_exec_output, 'exec-response': self._handle_exec_response, } handler = handlers[event.msg['type']] handler(event.msg)
def _maybe_relay_build_output(self, event_source, event): distbuild.crash_point() if self._request['id'] not in event.msg['ids']: return # not for us logging.debug('BC: got output: %s' % repr(event.msg)) artifact = self._find_artifact(event.artifact_cache_key) logging.debug('BC: got artifact: %s' % repr(artifact)) if artifact is None: # This is not the event you are looking for. return output = BuildOutput(self._request['id'], build_step_name(artifact), event.msg['stdout'], event.msg['stderr']) self.mainloop.queue_event(BuildController, output) logging.debug('BC: queued %s' % repr(output))
def _maybe_relay_build_output(self, event_source, event): distbuild.crash_point() if self._request['id'] not in event.msg['ids']: return # not for us logging.debug('BC: got output: %s' % repr(event.msg)) artifact = self._find_artifact(event.artifact_cache_key) logging.debug('BC: got artifact: %s' % repr(artifact)) if artifact is None: # This is not the event you are looking for. return output = BuildOutput( self._request['id'], build_step_name(artifact), event.msg['stdout'], event.msg['stderr']) self.mainloop.queue_event(BuildController, output) logging.debug('BC: queued %s' % repr(output))
def _request_caching(self, event_source, event): # This code should be moved into the morphlib.remoteartifactcache # module. It would be good to share it with morphlib.buildcommand, # which also wants to fetch artifacts from a remote cache. distbuild.crash_point() logging.debug('Requesting shared artifact cache to get artifacts') kind = self._job.artifact.source.morphology['kind'] if kind == 'chunk': source_artifacts = self._job.artifact.source.artifacts suffixes = ['%s.%s' % (kind, name) for name in source_artifacts] suffixes.append('build-log') else: filename = '%s.%s' % (kind, self._job.artifact.name) suffixes = [filename] if kind == 'stratum': suffixes.append(filename + '.meta') suffixes = [urllib.quote(x) for x in suffixes] suffixes = ','.join(suffixes) worker_host = self._conn.getpeername()[0] url = urlparse.urljoin( self._writeable_cache_server, '/1.0/fetch?host=%s:%d&cacheid=%s&artifacts=%s' % (urllib.quote(worker_host), self._worker_cache_server_port, urllib.quote(self._job.artifact.source.cache_key), suffixes)) msg = distbuild.message('http-request', id=self._request_ids.next(), url=url, method='GET', body=None, headers=None) self._helper_id = msg['id'] req = distbuild.HelperRequest(msg) self.mainloop.queue_event(distbuild.HelperRouter, req) progress = WorkerBuildCaching(self._job.initiators, self._job.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, progress)
def _maybe_finish_graph(self, event_source, event): distbuild.crash_point() def notify_failure(msg_text): logging.error('Graph creation failed: %s' % msg_text) failed = BuildFailed( self._request['id'], 'Failed to compute build graph: %s' % msg_text) self.mainloop.queue_event(BuildController, failed) self.mainloop.queue_event(self, _GraphFailed()) def notify_success(artifact): logging.debug('Graph is finished') progress = BuildProgress(self._request['id'], 'Finished computing build graph') self.mainloop.queue_event(BuildController, progress) build_steps = BuildSteps(self._request['id'], artifact) self.mainloop.queue_event(BuildController, build_steps) self.mainloop.queue_event(self, _GotGraph(artifact)) if event.msg['id'] == self._helper_id: self._helper_id = None error_text = self._artifact_error.peek() if event.msg['exit'] != 0 or error_text: notify_failure('Problem with serialise-artifact: %s' % error_text) if event.msg['exit'] != 0: return text = self._artifact_data.peek() try: artifact = distbuild.deserialise_artifact(text) except ValueError, e: logging.error(traceback.format_exc()) notify_failure(str(e)) return notify_success(artifact)
def _request_caching(self, event_source, event): # This code should be moved into the morphlib.remoteartifactcache # module. It would be good to share it with morphlib.buildcommand, # which also wants to fetch artifacts from a remote cache. distbuild.crash_point() logging.debug('Requesting shared artifact cache to get artifacts') kind = self._job.artifact.source.morphology['kind'] if kind == 'chunk': source_artifacts = self._job.artifact.source.artifacts suffixes = ['%s.%s' % (kind, name) for name in source_artifacts] suffixes.append('build-log') else: filename = '%s.%s' % (kind, self._job.artifact.name) suffixes = [filename] if kind == 'stratum': suffixes.append(filename + '.meta') suffixes = [urllib.quote(x) for x in suffixes] suffixes = ','.join(suffixes) worker_host = self._conn.getpeername()[0] url = urlparse.urljoin( self._writeable_cache_server, '/1.0/fetch?host=%s:%d&cacheid=%s&artifacts=%s' % (urllib.quote(worker_host), self._worker_cache_server_port, urllib.quote(self._job.artifact.source.cache_key), suffixes)) msg = distbuild.message( 'http-request', id=self._request_ids.next(), url=url, method='GET', body=None, headers=None) self._helper_id = msg['id'] req = distbuild.HelperRequest(msg) self.mainloop.queue_event(distbuild.HelperRouter, req) progress = WorkerBuildCaching(self._job.initiators, self._job.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, progress)
def _maybe_relay_build_step_started(self, event_source, event): distbuild.crash_point() if self._request['id'] not in event.initiators: return # not for us logging.debug( 'BC: _relay_build_step_started: %s' % event.artifact_cache_key) artifact = self._find_artifact(event.artifact_cache_key) if artifact is None: # This is not the event you are looking for. return logging.debug('BC: got build step started: %s' % artifact.name) started = BuildStepStarted( self._request['id'], build_step_name(artifact), event.worker_name) self.mainloop.queue_event(BuildController, started) logging.debug('BC: emitted %s' % repr(started))
def _queue_worker_builds(self, event_source, event): distbuild.crash_point() if self._artifact.state == BUILT: logging.info('Requested artifact is built') self.mainloop.queue_event(self, _Built()) return logging.debug('Queuing more worker-builds to run') if self.debug_graph_state: logging.debug('Current state of build graph nodes:') for a in map_build_graph(self._artifact, lambda a: a): logging.debug(' %s state is %s' % (a.name, a.state)) if a.state != BUILT: for dep in a.dependencies: logging.debug( ' depends on %s which is %s' % (dep.name, dep.state)) while True: ready = self._find_artifacts_that_are_ready_to_build() if len(ready) == 0: logging.debug('No new artifacts queued for building') break artifact = ready[0] logging.debug( 'Requesting worker-build of %s (%s)' % (artifact.name, artifact.source.cache_key)) request = distbuild.WorkerBuildRequest(artifact, self._request['id']) self.mainloop.queue_event(distbuild.WorkerBuildQueuer, request) artifact.state = BUILDING if artifact.source.morphology['kind'] == 'chunk': # Chunk artifacts are not built independently # so when we're building any chunk artifact # we're also building all the chunk artifacts # in this source for a in ready: if a.source == artifact.source: a.state = BUILDING
def _handle_json_message(self, event_source, event): distbuild.crash_point() logging.debug('Initiator: from controller: %s' % repr(event.msg)) handlers = { 'build-finished': self._handle_build_finished_message, 'build-failed': self._handle_build_failed_message, 'build-progress': self._handle_build_progress_message, 'build-steps': self._handle_build_steps_message, 'step-started': self._handle_step_started_message, 'step-already-started': self._handle_step_already_started_message, 'step-output': self._handle_step_output_message, 'step-finished': self._handle_step_finished_message, 'step-failed': self._handle_step_failed_message, } handler = handlers[event.msg['type']] handler(event.msg)
def _maybe_notify_build_failed(self, event_source, event): distbuild.crash_point() if self._request['id'] not in event.msg['ids']: return # not for us artifact = self._find_artifact(event.artifact_cache_key) if artifact is None: logging.error( 'BuildController %r: artifact %s is not in our build graph!', self, artifact) # We abort the build in this case on the grounds that something is # very wrong internally, and it's best for the initiator to receive # an error than to be left hanging. self.mainloop.queue_event(self, _Abort()) logging.info( 'Build step failed for %s: %s', artifact.name, repr(event.msg)) step_failed = BuildStepFailed( self._request['id'], build_step_name(artifact)) self.mainloop.queue_event(BuildController, step_failed) build_failed = BuildFailed( self._request['id'], 'Building failed for %s' % artifact.name) self.mainloop.queue_event(BuildController, build_failed) # Cancel any jobs waiting to be executed, since there is no point # running them if this build has failed, it would just waste # resources cancel_pending = distbuild.WorkerCancelPending( self._request['id']) self.mainloop.queue_event(distbuild.WorkerBuildQueuer, cancel_pending) # Cancel any currently executing jobs for the above reasons, since # this build will fail and we can't decide whether these jobs will # be of use to any other build cancel = BuildCancel(self._request['id']) self.mainloop.queue_event(BuildController, cancel) self.mainloop.queue_event(self, _Abort())
def _maybe_relay_build_step_started(self, event_source, event): distbuild.crash_point() if self._request['id'] not in event.initiators: return # not for us logging.debug('BC: _relay_build_step_started: %s' % event.artifact_cache_key) artifact = self._find_artifact(event.artifact_cache_key) if artifact is None: # This is not the event you are looking for. return logging.debug('BC: got build step started: %s' % artifact.name) started = BuildStepStarted(self._request['id'], build_step_name(artifact), event.worker_name) self.mainloop.queue_event(BuildController, started) logging.debug('BC: emitted %s' % repr(started))
def _queue_worker_builds(self, event_source, event): distbuild.crash_point() if self._artifact.state == BUILT: logging.info('Requested artifact is built') self.mainloop.queue_event(self, _Built()) return logging.debug('Queuing more worker-builds to run') if self.debug_graph_state: logging.debug('Current state of build graph nodes:') for a in map_build_graph(self._artifact, lambda a: a): logging.debug(' %s state is %s' % (a.name, a.state)) if a.state != BUILT: for dep in a.dependencies: logging.debug(' depends on %s which is %s' % (dep.name, dep.state)) while True: ready = self._find_artifacts_that_are_ready_to_build() if len(ready) == 0: logging.debug('No new artifacts queued for building') break artifact = ready[0] logging.debug('Requesting worker-build of %s (%s)' % (artifact.name, artifact.source.cache_key)) request = distbuild.WorkerBuildRequest(artifact, self._request['id']) self.mainloop.queue_event(distbuild.WorkerBuildQueuer, request) artifact.state = BUILDING if artifact.source.morphology['kind'] == 'chunk': # Chunk artifacts are not built independently # so when we're building any chunk artifact # we're also building all the chunk artifacts # in this source for a in ready: if a.source == artifact.source: a.state = BUILDING
def _handle_request(self, event_source, event): distbuild.crash_point() logging.debug('Handling build request for %s' % event.initiator_id) logging.debug('Current jobs: %s' % self._jobs) logging.debug('Workers available: %d' % len(self._available_workers)) # Have we already made a job for this thing? # If so, add our initiator id to the existing job # If not, create a job if self._jobs.exists(event.artifact.basename()): job = self._jobs.get(event.artifact.basename()) job.initiators.append(event.initiator_id) if job.running: logging.debug('Worker build step already started: %s' % event.artifact.basename()) progress = WorkerBuildStepAlreadyStarted( event.initiator_id, event.artifact.source.cache_key, job.who.name()) else: logging.debug( 'Job created but not building yet ' '(waiting for a worker to become available): %s' % event.artifact.basename()) progress = WorkerBuildWaiting(event.initiator_id, event.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, progress) else: logging.debug('WBQ: Creating job for: %s' % event.artifact.name) job = self._jobs.create(event.artifact, event.initiator_id) if self._available_workers: self._give_job(job) else: progress = WorkerBuildWaiting(event.initiator_id, event.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, progress)
def _start_graphing(self, event_source, event): distbuild.crash_point() logging.info('Start constructing build graph') self._artifact_data = distbuild.StringBuffer() self._artifact_error = distbuild.StringBuffer() argv = [ self._morph_instance, 'serialise-artifact', '--quiet', self._request['repo'], self._request['ref'], self._request['morphology'], ] if 'original_ref' in self._request: argv.append(self._request['original_ref']) self._helper_id = self._idgen.next() self._request_command_execution(argv, self._helper_id) progress = BuildProgress(self._request['id'], 'Computing build graph') self.mainloop.queue_event(BuildController, progress)
def _handle_request(self, event_source, event): distbuild.crash_point() logging.debug('Handling build request for %s' % event.initiator_id) logging.debug('Current jobs: %s' % self._jobs) logging.debug('Workers available: %d' % len(self._available_workers)) # Have we already made a job for this thing? # If so, add our initiator id to the existing job # If not, create a job if self._jobs.exists(event.artifact.basename()): job = self._jobs.get(event.artifact.basename()) job.initiators.append(event.initiator_id) if job.running: logging.debug('Worker build step already started: %s' % event.artifact.basename()) progress = WorkerBuildStepAlreadyStarted(event.initiator_id, event.artifact.source.cache_key, job.who.name()) else: logging.debug('Job created but not building yet ' '(waiting for a worker to become available): %s' % event.artifact.basename()) progress = WorkerBuildWaiting(event.initiator_id, event.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, progress) else: logging.debug('WBQ: Creating job for: %s' % event.artifact.name) job = self._jobs.create(event.artifact, event.initiator_id) if self._available_workers: self._give_job(job) else: progress = WorkerBuildWaiting(event.initiator_id, event.artifact.source.cache_key) self.mainloop.queue_event(WorkerConnection, progress)
def setup(self): distbuild.crash_point() logging.debug('WBQ: Setting up %s' % self) self._available_workers = [] self._jobs = Jobs( distbuild.IdentifierGenerator('WorkerBuildQueuerJob')) spec = [ # state, source, event_class, new_state, callback ('idle', WorkerBuildQueuer, WorkerBuildRequest, 'idle', self._handle_request), ('idle', WorkerBuildQueuer, WorkerCancelPending, 'idle', self._handle_cancel), ('idle', WorkerConnection, _NeedJob, 'idle', self._handle_worker), ('idle', WorkerConnection, _JobStarted, 'idle', self._set_job_started), ('idle', WorkerConnection, _JobFinished, 'idle', self._set_job_finished), ('idle', WorkerConnection, _JobFailed, 'idle', self._set_job_failed) ] self.add_transitions(spec)
def _reconnect(self, event_source, event): distbuild.crash_point() logging.debug('WC: Triggering reconnect') self.mainloop.queue_event(self._cm, distbuild.Reconnect())
def setup(self): distbuild.crash_point() spec = [ # state, source, event_class, new_state, callback ('init', self, _Start, 'graphing', self._start_graphing), ('init', self._initiator_connection, distbuild.InitiatorDisconnect, None, None), ('graphing', distbuild.HelperRouter, distbuild.HelperOutput, 'graphing', self._maybe_collect_graph), ('graphing', distbuild.HelperRouter, distbuild.HelperResult, 'graphing', self._maybe_finish_graph), ('graphing', self, _GotGraph, 'annotating', self._start_annotating), ('graphing', self, _GraphFailed, None, None), ('graphing', self._initiator_connection, distbuild.InitiatorDisconnect, None, None), ('annotating', distbuild.HelperRouter, distbuild.HelperResult, 'annotating', self._maybe_handle_cache_response), ('annotating', self, _AnnotationFailed, None, self._notify_annotation_failed), ('annotating', self, _Annotated, 'building', self._queue_worker_builds), ('annotating', self._initiator_connection, distbuild.InitiatorDisconnect, None, None), # The exact WorkerConnection that is doing our building changes # from build to build. We must listen to all messages from all # workers, and choose whether to change state inside the callback. # (An alternative would be to manage a set of temporary transitions # specific to WorkerConnection instances that our currently # building for us, but the state machines are not intended to # behave that way). ('building', distbuild.WorkerConnection, distbuild.WorkerBuildStepStarted, 'building', self._maybe_relay_build_step_started), ('building', distbuild.WorkerConnection, distbuild.WorkerBuildOutput, 'building', self._maybe_relay_build_output), ('building', distbuild.WorkerConnection, distbuild.WorkerBuildCaching, 'building', self._maybe_relay_build_caching), ('building', distbuild.WorkerConnection, distbuild.WorkerBuildStepAlreadyStarted, 'building', self._maybe_relay_build_step_already_started), ('building', distbuild.WorkerConnection, distbuild.WorkerBuildWaiting, 'building', self._maybe_relay_build_waiting_for_worker), ('building', distbuild.WorkerConnection, distbuild.WorkerBuildFinished, 'building', self._maybe_check_result_and_queue_more_builds), ('building', distbuild.WorkerConnection, distbuild.WorkerBuildFailed, 'building', self._maybe_notify_build_failed), ('building', self, _Abort, None, None), ('building', self, _Built, None, self._notify_build_done), ('building', distbuild.InitiatorConnection, distbuild.InitiatorDisconnect, 'building', self._maybe_notify_initiator_disconnected), ] self.add_transitions(spec) self.mainloop.queue_event(self, _Start())
def _maybe_collect_graph(self, event_source, event): distbuild.crash_point() if event.msg['id'] == self._helper_id: self._artifact_data.add(event.msg['stdout']) self._artifact_error.add(event.msg['stderr'])
def _request_job(self, event_source, event): distbuild.crash_point() self.mainloop.queue_event(WorkerConnection, _NeedJob(self))