import logging import grpc import helloworld_service_pb2_grpc from helloworld_service_pb2 import HelloRequest, HelloReply from dapr.ext.grpc import App import json class HelloWorldService(helloworld_service_pb2_grpc.HelloWorldService): def SayHello(self, request: HelloRequest, context: grpc.aio.ServicerContext) -> HelloReply: logging.info(request) return HelloReply(message='Hello, %s!' % request.name) app = App() if __name__ == '__main__': print('starting the HelloWorld Service') logging.basicConfig(level=logging.INFO) app.add_external_service( helloworld_service_pb2_grpc.add_HelloWorldServiceServicer_to_server, HelloWorldService()) app.run(50051)
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.layer = torch.nn.Linear(1, 1) def forward(self, x): x = self.layer(x) return x net = Net() print(net, flush=True) app = App() @app.subscribe(pubsub_name='pubsub', topic='DATA') def mytopic(event: v1.Event) -> None: X = json.loads(event.Data()).get('X') Y = json.loads(event.Data()).get('Y') # convert numpy array to tensor in shape of input size x = torch.from_numpy(np.asarray(X).reshape(-1, 1)).float() y = torch.from_numpy(np.asarray(Y).reshape(-1, 1)).float() # Define Optimizer and Loss Function optimizer = torch.optim.SGD(net.parameters(), lr=0.2) loss_func = torch.nn.MSELoss()
def setUp(self): self._app = App()
import os import json import logging from dapr.clients import DaprClient from dapr.ext.grpc import App, BindingRequest APP_PORT = os.getenv("APP_PORT", "3001") PUBSUB_NAME = os.getenv("PUBSUB_NAME", "processed") TOPIC_NAME = os.getenv("TOPIC_NAME", "processed-tweets") STORE_NAME = os.getenv("STORE_NAME", "tweet-store") app = App() @app.binding('tweets') def binding(request: BindingRequest): payload = request.text() m = extract_tweets(json.loads(payload)) logging.info(m) with DaprClient() as d: tweet_data = json.dumps(m) d.save_state(STORE_NAME, m['id'], tweet_data) resp = d.invoke_method( 'tweet-processor', 'sentiment-score', data=tweet_data)
import time from concurrent import futures from dapr.ext.grpc import App, InvokeMethodRequest, InvokeMethodResponse from opencensus.trace.samplers import AlwaysOnSampler from opencensus.trace.tracer import Tracer from opencensus.ext.grpc import server_interceptor from opencensus.trace.samplers import AlwaysOnSampler tracer_interceptor = server_interceptor.OpenCensusServerInterceptor( AlwaysOnSampler()) app = App(thread_pool=futures.ThreadPoolExecutor(max_workers=10), interceptors=(tracer_interceptor, )) @app.method(name='say') def say(request: InvokeMethodRequest) -> InvokeMethodResponse: tracer = Tracer(sampler=AlwaysOnSampler()) with tracer.span(name='say') as span: data = request.text() span.add_annotation('Request length', len=len(data)) print(request.metadata, flush=True) print(request.text(), flush=True) return InvokeMethodResponse(b'SAY', "text/plain; charset=UTF-8") @app.method(name='sleep') def sleep(request: InvokeMethodRequest) -> InvokeMethodResponse:
def app(self) -> "App": return App()
import time from uuid import uuid4 from context import WorkflowContext from dotenv import load_dotenv from dapr.clients.grpc.client import DaprClient import json load_dotenv() step_name = "step_3_consume" pubsub_name = "redispubsub" topic_name = "longRunningTasks" app = App() @app.subscribe(pubsub_name=pubsub_name, topic=topic_name) def longRunningTaskFinished(event: v1.Event) -> None: time.sleep(5) print( f"{step_name}: Long running task finished at {datetime.datetime.now().isoformat()}", flush=True) app.stop() with WorkflowContext(step_name) as context: with DaprClient() as d: app.run(20001)