def run(self): ident = self.minion_daemon.add_thread() logger.meta.prefix = '[%s] ' % ident info = None try: creds = self.sock.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED, struct.calcsize('3i')) info = 'pid=%d, uid=%d, gid=%d' % struct.unpack('3i', creds) logger.info('accepting connection from %s' % (info,)) buf = b'' while True: data = self.sock.recv(4096) if not data: break buf += data if b'\n' not in buf: continue cmds = buf.split(b'\n') buf = cmds[-1] cmds = cmds[:-1] for cmd in cmds: cmd = cmd.decode('ascii') logger.debug('received %r' % (cmd,)) output = self.minion_daemon.dispatch(self.sock, cmd) self.sock.sendall(json.dumps(output).encode('utf8')) self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() except BrokenPipeError as e: pass except Exception as e: logger.exception('error processing request') finally: self.minion_daemon.rm_thread(ident) logger.info('finished')
def dispatch(self, sock, cmd): cmd = shlex.split(cmd, posix=True) if not cmd: logger.error('[%s] submitted empty command') return assert len(cmd) > 0 try: if cmd[0] == 'update-heads': return self.dispatch_update_heads(sock, cmd[1:]) elif cmd[0] == 'new-target': return self.dispatch_new_target(sock, cmd[1:]) elif cmd[0] == 'del-target': return self.dispatch_del_target(sock, cmd[1:]) elif cmd[0] == 'set-refspec': return self.dispatch_set_refspec(sock, cmd[1:]) elif cmd[0] == 'build': return self.dispatch_build(sock, cmd[1:]) elif cmd[0] == 'status': return self.dispatch_status(sock, cmd[1:]) elif cmd[0] == 'add-blob': return self.dispatch_add_blob(sock, cmd[1:]) else: logger.error('submitted unknown command %r' % (cmd[0],)) return {'status': 'error', 'error': 'unknown command %r' % (cmd[0],)} except MinionException as e: logger.error('%s failed: %s' % (cmd[0], e)) return {'status': 'exception', 'error': str(e)} except Exception as e: logger.exception('%s failed' % (cmd[0],)) return {'status': 'exception', 'error': str(e)}
def run(self): try: logger.meta.prefix = '[build %s/%s] ' % (self.controller.name, self.proc.name) if not self.controller.wait_for(self.proc): logger.info('aborting because of prior failure') return sources, artifacts = self.controller.get_inputs() stub = self.stub(sources, artifacts, '-') if self.controller.is_cached(stub): logger.debug('finishing with released copy') self.controller.finish_cached(self.proc, stub, released=True) return image = self.build_image(self.proc.path) logger.debug('docker image is %r' % image) stub = self.stub(sources, artifacts, image) if self.controller.is_cached(stub): success, X, X = self.controller.get_cached(self.proc, stub) if success or not self.controller.retry_failures: logger.debug('finishing with cached copy') self.controller.finish_cached(self.proc, stub) return success, log, artifacts = self.run_image(sources, artifacts, stub, image) if success: self.controller.finish_success(self.proc, stub, log, artifacts) else: self.controller.finish_error(self.proc, stub, log) except Exception as e: logger.exception('docker worker failed') finally: self.controller.abort_if_not_finished(self.proc)