async def test_workflow_workflow_get_version():
    global v1_hits, v2_hits
    client: WorkflowClient = WorkflowClient.new_client("localhost",
                                                       7233,
                                                       namespace=NAMESPACE)
    factory = WorkerFactory(client, NAMESPACE)
    worker = factory.new_worker(TASK_QUEUE)
    worker.register_workflow_implementation_type(TestWorkflowGetVersionImplV1)
    factory.start()

    workflow: TestWorkflowGetVersion = client.new_workflow_stub(
        TestWorkflowGetVersion)

    await client.start(workflow.get_greetings)
    while v1_hits == 0:
        print(".", end="")
        await asyncio.sleep(2)

    worker.register_workflow_implementation_type(TestWorkflowGetVersionImplV2)

    while not v2_done:
        print(".", end="")
        await asyncio.sleep(2)

    assert v1_hits == 1
    assert v2_hits == 1
    assert version_found_in_v2_step_1_0 == DEFAULT_VERSION
    assert version_found_in_v2_step_1_1 == DEFAULT_VERSION
    assert version_found_in_v2_step_2_0 == DEFAULT_VERSION
    assert version_found_in_v2_step_2_1 == DEFAULT_VERSION

    # TODO: Assert that there are no markers recorded

    await cleanup_worker(client, worker)
Ejemplo n.º 2
0
async def _start_worker():
    client = workflow_client()
    factory = WorkerFactory(client=client, namespace=client.namespace)
    worker = factory.new_worker(task_queue=TASK_QUEUE)
    worker.register_activities_implementation(
        activities_instance=PodcastTranscribeActivitiesImpl(),
        activities_cls_name=PodcastTranscribeActivities.__name__,
    )
    worker.register_workflow_implementation_type(impl_cls=PodcastTranscribeWorkflowImpl)
    factory.start()
Ejemplo n.º 3
0
def worker(request):
    marker = request.node.get_closest_marker("worker_config")
    namespace = marker.args[0]
    task_queue = marker.args[1]
    activities = marker.kwargs.get("activities", [])
    workflows = marker.kwargs.get("workflows", [])

    factory = WorkerFactory("localhost", 7233, namespace)
    worker_instance = factory.new_worker(task_queue)
    for a_instance, a_cls in activities:
        worker_instance.register_activities_implementation(a_instance, a_cls)
    for w in workflows:
        worker_instance.register_workflow_implementation_type(w)
    factory.start()

    yield worker_instance

    cleanup_worker(worker_instance)
Ejemplo n.º 4
0
async def worker(request):
    marker = request.node.get_closest_marker("worker_config")
    namespace = marker.args[0]
    task_queue = marker.args[1]
    activities = marker.kwargs.get("activities", [])
    workflows = marker.kwargs.get("workflows", [])
    data_converter = marker.kwargs.get("data_converter", DEFAULT_DATA_CONVERTER_INSTANCE)

    client: WorkflowClient = WorkflowClient.new_client("localhost", 7233, data_converter=data_converter)
    factory = WorkerFactory(client, namespace)
    worker_instance = factory.new_worker(task_queue)
    for a_instance, a_cls in activities:
        worker_instance.register_activities_implementation(a_instance, a_cls)
    for w in workflows:
        worker_instance.register_workflow_implementation_type(w)
    factory.start()

    yield worker_instance

    asyncio.create_task(cleanup_worker(client, worker_instance))
Ejemplo n.º 5
0
async def client_main():
    # ENG
    # 1- We create a new client associated with the namespace
    # 2- We define a workerfactory associated with the namespace
    #3- We create the worker
    #4- We register the workflow implementation
    #5- We start the factory

    # SPA
    # 1- Creamos el nuevo cliente asociado con el namespace
    #2- Definimos el workerfactory asociado con el namespace
    #3- Creamos el worker
    #4- Registramos la implementación de la actividad
    #5- Iniciamos el factory
    client = WorkflowClient.new_client(namespace=NAMESPACE)
    factory = WorkerFactory(client, NAMESPACE)
    worker = factory.new_worker(TASK_QUEUE)
    worker.register_activities_implementation(QuestionActivitiesImpl(),
                                              "QuestionActivities")
    factory.start()
    print("Worker started")
Ejemplo n.º 6
0
async def test_workflow():
    db = connect_to_db()

    test_medium = create_test_medium(db=db, label='test')
    test_feed = create_test_feed(db=db, label='test', medium=test_medium)

    # 'label' is important as it will be stored in both stories.title and stories.description, which in turn will be
    # used to guess the probable language of the podcast episode
    test_story = create_test_story(db=db,
                                   label='keeping up with Kardashians',
                                   feed=test_feed)

    stories_id = test_story['stories_id']

    with open(TEST_MP3_PATH, mode='rb') as f:
        test_mp3_data = f.read()

    # noinspection PyUnusedLocal
    def __mp3_callback(request: HashServer.Request) -> Union[str, bytes]:
        response = "".encode('utf-8')
        response += "HTTP/1.0 200 OK\r\n".encode('utf-8')
        response += "Content-Type: audio/mpeg\r\n".encode('utf-8')
        response += f"Content-Length: {len(test_mp3_data)}\r\n".encode('utf-8')
        response += "\r\n".encode('utf-8')
        response += test_mp3_data
        return response

    port = random_unused_port()
    pages = {
        '/test.mp3': {
            'callback': __mp3_callback,
        }
    }

    hs = HashServer(port=port, pages=pages)
    hs.start()

    # Not localhost as this might get fetched from a remote worker
    mp3_url = hs.page_url('/test.mp3')

    db.insert(table='story_enclosures',
              insert_hash={
                  'stories_id': stories_id,
                  'url': mp3_url,
                  'mime_type': 'audio/mpeg',
                  'length': len(test_mp3_data),
              })

    client = workflow_client()

    # Start worker
    factory = WorkerFactory(client=client, namespace=client.namespace)
    worker = factory.new_worker(task_queue=TASK_QUEUE)

    # Use an activities implementation with random GCS prefixes set
    activities = _RandomPrefixesPodcastTranscribeActivities()

    worker.register_activities_implementation(
        activities_instance=activities,
        activities_cls_name=PodcastTranscribeActivities.__name__,
    )
    worker.register_workflow_implementation_type(
        impl_cls=PodcastTranscribeWorkflowImpl)
    factory.start()

    # Initialize workflow instance
    workflow: PodcastTranscribeWorkflow = client.new_workflow_stub(
        cls=PodcastTranscribeWorkflow,
        workflow_options=WorkflowOptions(
            workflow_id=str(stories_id),

            # By default, if individual activities of the workflow fail, they will get restarted pretty much
            # indefinitely, and so this test might run for days (or rather just timeout on the CI). So we cap the
            # workflow so that if it doesn't manage to complete in X minutes, we consider it as failed.
            workflow_run_timeout=timedelta(minutes=5),
        ),
    )

    # Wait for the workflow to complete
    await workflow.transcribe_episode(stories_id)

    downloads = db.select(table='downloads', what_to_select='*').hashes()
    assert len(downloads) == 1
    first_download = downloads[0]
    assert first_download['stories_id'] == stories_id
    assert first_download['type'] == 'content'
    assert first_download['state'] == 'success'

    download_content = fetch_content(db=db, download=first_download)

    # It's what gets said in the sample MP3 file
    assert 'Kim Kardashian' in download_content

    # Initiate the worker shutdown in the background while we do the GCS cleanup so that the stop_workers_faster()
    # doesn't have to wait that long
    await worker.stop(background=True)

    log.info("Cleaning up GCS...")
    GCSStore(bucket_config=activities.config.raw_enclosures()).delete_object(
        object_id=str(stories_id))
    GCSStore(
        bucket_config=activities.config.transcoded_episodes()).delete_object(
            object_id=str(stories_id))
    GCSStore(bucket_config=activities.config.transcripts()).delete_object(
        object_id=str(stories_id))
    log.info("Cleaned up GCS")

    log.info("Stopping workers...")
    await stop_worker_faster(worker)
    log.info("Stopped workers")