def bootstrap(self, manifest: 'Manifest', mocked=False) -> 'CeleryClient': # discover actions registered with app via decorator manifest = Manifest(manifest) self.scanner.scan(manifest.package) if not mocked: # trigger creation of the native Celery task objects self.app.manifest = manifest self.app.initialize_celery() console.info( message=f'initializing Celery client', data={ 'broker': self.app.celery.conf.broker_url, 'tasks': sorted(self.app.actions.keys()), } ) for action in self.app.actions.values(): action.bootstrap() self.methods[action.name] = action else: # if "mocked", do not bother doing anything but # creating mock client methods for action in self.app.actions.values(): self.methods[action.name] = MagicMock() return self
def _init_server_and_run(self): # the grpc server runs in a thread pool self.grpc.executor = ThreadPoolExecutor( max_workers=self.grpc.worker_thread_pool_size, initializer=self.on_start_worker, initargs=(self, )) for i in range(self.grpc.worker_thread_pool_size): self.grpc.executor.submit(lambda: None) # build grpc server self.grpc.server = grpc.server(self.grpc.executor) self.grpc.server.add_insecure_port(self.grpc.options.server_address) # build grpcio "servicer" class self.grpc.servicer = self._grpc_servicer_factory() self.grpc.servicer.add_to_grpc_server(self.grpc.servicer, self.grpc.server) self.grpc.server.start() # sleep-lock the main thread to keep the server running, # as the server runs as a daemon. try: while True: time.sleep(9999) except KeyboardInterrupt: # stop server on ctrl+c if self.grpc.server is not None: console.info(f'stopping gRPC server (PID: {os.getpid()})') self.grpc.server.stop(grace=self.grpc.options.grace)
def post_bad_request( self, action: 'Action', request: 'Request', exc: Exception, ): """ Rollback a failed transaction. """ console.info(f'rolling back sqlalchemy transaction') try: self.store_type.rollback() finally: self.store_type.close()
def on_start(self): """ Start the RPC client or server. """ # try to ensure the desired port is available if is_port_in_use(self.grpc.options.server_address): console.critical( f'gRPC service address {self.grpc.options.server_address} ' f'in use. trying to kill stale service processes.') server_port = self.grpc.options.port try: kill_cmd = f'lsof -ti tcp:{server_port} | xargs kill' console.info( message=f'running command to free gRPC service port', data={ 'port': server_port, 'command': kill_cmd }) subprocess.getoutput(kill_cmd) except Exception: console.exception( f'error running command to free gRPC service port') exit(-1) if is_port_in_use(self.grpc.options.server_address): console.critical('could not start gRPC service') exit(-1) console.info(message='strating gRPC server. Press ctrl+c to stop.', data={ 'address': self.grpc.options.server_address, 'methods': list(self.actions.keys()), 'worker_process_count': self.grpc.worker_process_count, 'worker_thread_pool_size': self.grpc.worker_thread_pool_size, }) # start the server process/es if self.grpc.worker_process_count == 1: # serve in main forground process self._init_server_and_run() else: # serve in forked subprocesses self._spawn_worker_processes()
def _compile_grpc_web_client_stub(self, dest: Text): """ Compile the grpc .proto file, generating pb2 and pb2_grpc modules in the build directory. These modules contain abstract base classes required by the grpc server and client. """ # build the shell command to run.... protoc_command = PROTOC_WEB_GENERATE_CLIENT_COMMAND.format( include_dir=os.path.realpath(os.path.dirname(self.grpc.proto_file)) or '.', build_dir=dest, proto_file=os.path.basename(self.grpc.proto_file), ).strip() console.info(message='generating gRPC-web client stub', data={'command': protoc_command.split('\n')}) err_msg = subprocess.getoutput(re.sub(r'\s+', ' ', protoc_command)) if err_msg: exit(err_msg)
def create_tables(cls, overwrite=False): """ Create all tables for all SqlalchemyStores used in the host app. """ if not cls.is_bootstrapped(): console.error(f'{get_class_name(cls)} cannot create ' f'tables unless bootstrapped') return meta = cls.get_metadata() engine = cls.get_engine() if overwrite: console.info('dropping Resource SQL tables...') meta.drop_all(engine) # create all tables console.info('creating Resource SQL tables...') meta.create_all(engine)
def __init__(self, app: 'GrpcApplication'): assert app.is_bootstrapped self._address = app.grpc.options.client_address self._app = app if app.grpc.options.secure_channel: self._channel = grpc.secure_channel(self._address, grpc.ssl_channel_credentials()) else: self._channel = grpc.insecure_channel(self._address) console.info(message='gRPC client initialized', data={ 'address': self._address, 'secure': app.grpc.options.secure_channel }) GrpcApplicationStub = app.grpc.pb2_grpc.GrpcApplicationStub self._grpc_stub = GrpcApplicationStub(self._channel) self._funcs = {k: self._build_func(p) for k, p in app.actions.items()}
def _generate_proto_file(self): """ Iterate over function actions, using request and response schemas to generate protobuf message and service types. """ # all lines in proto source code accumulated into "lines" lines = ['syntax = "proto3";'] # service interface function delaration string accumulator decls = [] # generate proto messages for each Resource class in this app msg_gen = MessageGenerator() for name, resource_type in self.res.items(): lines.append(msg_gen.emit_resource_message(resource_type) + '\n') # generate request and response schemas along with service # interface function declarations, derived them application actions for action in self.actions.values(): decls.append(action.generate_protobuf_function_declaration()) if action.schemas.request: lines.append(action.generate_request_message_type()) if action.schemas.response: lines.append(action.generate_response_message_type()) # assemble service interface definition block lines.append('service GrpcApplication {') for decl in decls: lines.append(' ' + decl) lines.append('}\n') # write final protobuf source code to file console.info(message='generating gRPC proto file', data={'destination': self.grpc.proto_file}) with open(self.grpc.proto_file, 'w') as fout: fout.write('\n'.join(lines))
def bootstrap_celery_worker_process(*args, **kwargs): console.info('bootstrapping celery worker process') self.bootstrap(self.local.manifest)
def _render_grpc_web_client_files(self, dest): dest = os.path.realpath(dest) # generate JSON schemas for all ravel schemas # referenced in this app def unnest(schema, unnested): for child_schema in schema.children: child_schema_name = get_stripped_schema_name(child_schema) if child_schema_name in unnested: console.warning( message=(f'gRPC web client build process ' f'ignoring schema with duplicate name'), data={ 'schema': child_schema_name, 'parent_schema': get_stripped_schema_name(schema), }) unnested[child_schema_name] = schema unnest(child_schema, unnested) nested_schemas = OrderedDict() for resource_type in self.res.values(): unnest(resource_type.ravel.schema, nested_schemas) action_schemas = OrderedDict() for action in self.actions.values(): for schema in action.schemas.values(): action_schemas[get_stripped_schema_name(schema)] = schema # initialize templating engine template_dir = os.path.join(os.path.dirname(__file__), 'templates') jinja = TemplateEnvironment(template_dir) template_context = { 'list': list, 'dict': dict, 'str': str, 'int': int, 'set': set, 'len': len, 'app': self, 'inspect': inspect, 'nested_schemas': nested_schemas, 'action_schemas': action_schemas, 'json_schema_generator': JsonSchemaGenerator(), 'class_name': get_class_name, } console.info(message=f'jinja2 template directory', data={'path': template_dir}) # render templates for js_fname in glob.glob(template_dir + '/*.js'): js_fname = os.path.basename(js_fname) with open(os.path.join(dest, js_fname), 'w') as js_file: # render template from file template = jinja.from_filename(js_fname) js_source = template.render(template_context) # lint the generated JS code formated_js_source = jsbeautifier.beautify(js_source) # write to dest directory console.info(f'rendering {dest}/{os.path.basename(js_fname)}') js_file.write(formated_js_source)
def on_start(self): console.info( f'HTTP server listening on http://{self.host}:{self.port}' ) self.server.serve_forever()
def on_start(self, *args, **kwargs): console.info( message='starting Falcon web service', data={'endpoints': sorted(self.actions.keys())} ) return self.entrypoint(*args, **kwargs)