Example #1
0
 async def update_merge_graph_batch(self,
                                    request: Request) -> StreamResponse:
     log.info("Received put_sub_graph_batch request")
     graph_id = request.match_info.get("graph_id", "resoto")
     task_id: Optional[TaskId] = None
     if tid := request.headers.get("Resoto-Worker-Task-Id"):
         task_id = TaskId(tid)
Example #2
0
async def test_handler_invocation(
    merge_handler: MergeOuterEdgesHandler,
    subscription_handler: SubscriptionHandler,
    message_bus: MessageBus,
) -> None:
    merge_called: asyncio.Future[TaskId] = asyncio.get_event_loop(
    ).create_future()

    def mocked_merge(task_id: TaskId) -> None:
        merge_called.set_result(task_id)

    # monkey patching the merge_outer_edges method
    # use setattr here, since assignment does not work in mypy https://github.com/python/mypy/issues/2427
    setattr(merge_handler, "merge_outer_edges", mocked_merge)

    subscribers = await subscription_handler.list_subscriber_for(
        merge_outer_edges)

    assert subscribers[0].id == "resotocore"

    task_id = TaskId("test_task_1")

    await message_bus.emit(
        Action(merge_outer_edges, task_id, merge_outer_edges))

    assert await merge_called == task_id
async def test_merge_process(event_sender: AnalyticsEventSender,
                             graph_db: ArangoGraphDB,
                             foo_kinds: List[Kind]) -> None:
    # set explicitly (is done in main explicitly as well)
    set_start_method("spawn")

    # wipe any existing data
    await graph_db.wipe()
    # store the model in db, so it can be loaded by the sub process
    graph_db.db.collection("model").insert_many([to_js(a) for a in foo_kinds])
    # define args to parse for the sub process
    config = empty_config([
        "--graphdb-username", "test", "--graphdb-password", "test",
        "--graphdb-database", "test"
    ])
    # create sample graph data to insert
    graph = create_graph("test")

    await outer_edge_db(graph_db.db,
                        "deferred_outer_edges").create_update_schema()

    async def iterator() -> AsyncGenerator[bytes, None]:
        for node in graph.nodes():
            yield bytes(json.dumps(graph.nodes[node]), "utf-8")
        for from_node, to_node, data in graph.edges(data=True):
            yield bytes(
                json.dumps({
                    "from": from_node,
                    "to": to_node,
                    "edge_type": data["edge_type"]
                }), "utf-8")
        yield bytes(
            json.dumps({
                "from_selector": {
                    "node_id": "id_123"
                },
                "to_selector": {
                    "node_id": "id_456"
                },
                "edge_type": "delete"
            }),
            "utf-8",
        )

    result = await merge_graph_process(graph_db, event_sender, config,
                                       iterator(), timedelta(seconds=30), None,
                                       TaskId("test_task_123"))
    assert result == GraphUpdate(112, 1, 0, 212, 0, 0)
    elem = graph_db.db.collection("deferred_outer_edges").all().next()
    assert elem["_key"] == "test_task_123"
    assert elem["task_id"] == "test_task_123"
    assert elem["edges"][0] == {
        "from_node": "id_123",
        "to_node": "id_456",
        "edge_type": "delete"
    }
Example #4
0
 async def create_work(self, request: Request) -> StreamResponse:
     attrs = {k: v for k, v in request.query.items() if k != "task"}
     future = asyncio.get_event_loop().create_future()
     task = WorkerTask(TaskId(uuid_str()), "test", attrs, {
         "some": "data",
         "foo": "bla"
     }, future, timedelta(seconds=3))
     await self.worker_task_queue.add_task(task)
     await future
     return web.HTTPOk()
def test_message_serialization() -> None:
    task_id = TaskId("123")
    subsctiber_id = SubscriberId("sub")
    roundtrip(Event("test", {"a": "b", "c": 1, "d": "bla"}))
    roundtrip(Action("test", task_id, "step_name"))
    roundtrip(Action("test", task_id, "step_name", {"test": 1}))
    roundtrip(ActionDone("test", task_id, "step_name", subsctiber_id))
    roundtrip(
        ActionDone("test", task_id, "step_name", subsctiber_id, {"test": 1}))
    roundtrip(ActionError("test", task_id, "step_name", subsctiber_id, "oops"))
    roundtrip(
        ActionError("test", task_id, "step_name", subsctiber_id, "oops",
                    {"test": 23}))
 def empty(
     descriptor: TaskDescription,
     subscriber_by_event: Callable[[], Dict[str, List[Subscriber]]]
 ) -> Tuple[RunningTask, Sequence[TaskCommand]]:
     assert len(
         descriptor.steps) > 0, "TaskDescription needs at least one step!"
     uid = TaskId(str(uuid.uuid1()))
     task = RunningTask(uid, descriptor, subscriber_by_event)
     messages = [
         SendMessage(Event("task_started", data={"task": descriptor.name})),
         *task.move_to_next_state()
     ]
     return task, messages
Example #7
0
 async def acknowledge_config_change(self, cfg_id: str, config: Json) -> None:
     """
     In case an external entity should acknowledge this config change.
     This method either return, which signals success or throws an exception.
     """
     future = asyncio.get_event_loop().create_future()
     task = WorkerTask(
         TaskId(uuid_str()),
         WorkerTaskName.validate_config,
         {"config_id": cfg_id},
         {"task": WorkerTaskName.validate_config, "config": config},
         future,
         timedelta(seconds=30),
     )
     # add task to queue - do not retry
     await self.task_queue.add_task(task)
     # In case the config is not valid or no worker is available
     # this future will throw an exception.
     # Do not handle it here and let the error bubble up.
     await future
def create_task(uid: str, name: str) -> WorkerTask:
    return WorkerTask(TaskId(uid), name, {}, {}, asyncio.get_event_loop().create_future(), timedelta())