tracer_interceptor = server_interceptor.OpenCensusServerInterceptor( AlwaysOnSampler()) app = App(thread_pool=futures.ThreadPoolExecutor(max_workers=10), interceptors=(tracer_interceptor, )) @app.method(name='say') def say(request: InvokeMethodRequest) -> InvokeMethodResponse: tracer = Tracer(sampler=AlwaysOnSampler()) with tracer.span(name='say') as span: data = request.text() span.add_annotation('Request length', len=len(data)) print(request.metadata, flush=True) print(request.text(), flush=True) return InvokeMethodResponse(b'SAY', "text/plain; charset=UTF-8") @app.method(name='sleep') def sleep(request: InvokeMethodRequest) -> InvokeMethodResponse: tracer = Tracer(sampler=AlwaysOnSampler()) with tracer.span(name='sleep') as _: time.sleep(2) print(request.metadata, flush=True) print(request.text(), flush=True) return InvokeMethodResponse(b'SLEEP', "text/plain; charset=UTF-8") app.run(3001)
outputs = Variable(y) for i in range(25): prediction = net(inputs) loss = loss_func(prediction, outputs) optimizer.zero_grad() loss.backward() optimizer.step() if i % 5 == 0: # plot and show learning process plt.cla() plt.scatter(x.data.numpy(), y.data.numpy()) plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=2) plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={ 'size': 10, 'color': 'red' }) plt.pause(0.1) # display(fig) # make_dot(net) for param in net.parameters(): print(param) app.run(50051)
import time from uuid import uuid4 from context import WorkflowContext from dotenv import load_dotenv from dapr.clients.grpc.client import DaprClient import json load_dotenv() step_name = "step_3_consume" pubsub_name = "redispubsub" topic_name = "longRunningTasks" app = App() @app.subscribe(pubsub_name=pubsub_name, topic=topic_name) def longRunningTaskFinished(event: v1.Event) -> None: time.sleep(5) print( f"{step_name}: Long running task finished at {datetime.datetime.now().isoformat()}", flush=True) app.stop() with WorkflowContext(step_name) as context: with DaprClient() as d: app.run(20001)