Esempio n. 1
0
def test_progression_start_iter_not_zero(full_snapshot):
    source_model = SnapshotModel()
    model = ProgressProxyModel(source_model, parent=None)

    reporting_mode = qt_api.QtTest.QAbstractItemModelTester.FailureReportingMode.Warning
    tester = qt_api.QtTest.QAbstractItemModelTester(  # noqa, prevent GC
        model, reporting_mode)

    source_model._add_snapshot(SnapshotModel.prerender(full_snapshot), 1)

    assert model.data(model.index(0, 0, QModelIndex()), ProgressRole) == {
        "nr_reals": 100,
        "status": {
            REALIZATION_STATE_UNKNOWN: 100
        },
    }

    partial = PartialSnapshot(full_snapshot)
    partial.update_real("0", Realization(status=REALIZATION_STATE_FINISHED))
    source_model._add_partial_snapshot(SnapshotModel.prerender(partial), 1)

    assert model.data(model.index(0, 0, QModelIndex()), ProgressRole) == {
        "nr_reals": 100,
        "status": {
            REALIZATION_STATE_UNKNOWN: 99,
            REALIZATION_STATE_FINISHED: 1
        },
    }
Esempio n. 2
0
def test_changes(full_snapshot):
    source_model = SnapshotModel()

    model = JobListProxyModel(None, 0, 0, 0, 0)
    model.setSourceModel(source_model)

    reporting_mode = qt_api.QtTest.QAbstractItemModelTester.FailureReportingMode.Warning
    tester = qt_api.QtTest.QAbstractItemModelTester(  # noqa, prevent GC
        model, reporting_mode)

    source_model._add_snapshot(SnapshotModel.prerender(full_snapshot), 0)
    assert (model.index(0, _id_to_col(ids.STATUS),
                        QModelIndex()).data() == JOB_STATE_START)

    partial = PartialSnapshot(full_snapshot)
    start_time = datetime.datetime(year=2020, month=10, day=27, hour=12)
    end_time = datetime.datetime(year=2020, month=10, day=28, hour=13)
    partial.update_job(
        "0",
        "0",
        "0",
        job=Job(
            status=JOB_STATE_FAILURE,
            start_time=start_time,
            end_time=end_time,
        ),
    )
    source_model._add_partial_snapshot(SnapshotModel.prerender(partial), 0)
    assert (model.index(0, _id_to_col(DURATION),
                        QModelIndex()).data() == "1 day, 1:00:00")
    assert (model.index(0, _id_to_col(ids.STATUS),
                        QModelIndex()).data() == JOB_STATE_FAILURE)
Esempio n. 3
0
 def update_snapshot(self, events):
     snapshot_mutate_event = PartialSnapshot(self._snapshot)
     for event in events:
         snapshot_mutate_event.from_cloudevent(event)
     self._snapshot.merge_event(snapshot_mutate_event)
     if self._status != self._snapshot.get_status():
         self._status = self._status_tracker.update_state(
             self._snapshot.get_status())
     return snapshot_mutate_event
Esempio n. 4
0
def test_update_partial_from_multiple_cloudevents(snapshot):
    partial = PartialSnapshot(snapshot)
    partial.from_cloudevent(
        CloudEvent({
            "id": "0",
            "type": ids.EVTYPE_FM_JOB_RUNNING,
            "source": "/real/0/step/0/job/0",
        }))
    partial.from_cloudevent(
        CloudEvent(
            {
                "id": "0",
                "type": ids.EVTYPE_FM_JOB_FAILURE,
                "source": "/real/0/step/0/job/0",
            },
            {ids.ERROR_MSG: "failed"},
        ))
    partial.from_cloudevent(
        CloudEvent({
            "id": "1",
            "type": ids.EVTYPE_FM_JOB_SUCCESS,
            "source": "/real/0/step/0/job/1",
        }))
    jobs = partial.to_dict()["reals"]["0"]["steps"]["0"]["jobs"]
    jobs["0"]["status"] == state.JOB_STATE_FAILURE
    jobs["1"]["status"] == state.JOB_STATE_FINISHED
Esempio n. 5
0
def test_realization_job_hint(full_snapshot):
    model = SnapshotModel()
    model._add_snapshot(full_snapshot, 0)

    partial = PartialSnapshot(full_snapshot)
    partial.update_job("0", "0", "0", Job(status=JOB_STATE_RUNNING))
    model._add_partial_snapshot(partial, 0)

    first_real = model.index(0, 0, model.index(0, 0))
    colors = model.data(first_real, RealJobColorHint)
    assert colors[0].name() == QColor(*COLOR_RUNNING).name()
    assert colors[1].name() == QColor(*COLOR_PENDING).name()
Esempio n. 6
0
def test_multiple_cloud_events_trigger_non_communicated_change():
    """In other words, though we say all steps are finished, we don't
    explicitly send an event that changes the realization status. It should
    happen by virtue of the steps being completed."""
    snapshot = (SnapshotBuilder().add_step(
        step_id="0", status="Unknown").build(["0"], status="Unknown"))
    partial = PartialSnapshot(snapshot)
    partial.from_cloudevent(
        CloudEvent({
            "id": "0",
            "type": ids.EVTYPE_FM_STEP_SUCCESS,
            "source": "/real/0/step/0",
        }))
    assert partial.to_dict(
    )["reals"]["0"]["status"] == state.REALIZATION_STATE_FINISHED
Esempio n. 7
0
 async def _ensemble_cancelled_handler(self, event):
     if self._snapshot.get_status() != ENSEMBLE_STATE_FAILED:
         snapshot_mutate_event = PartialSnapshot(self._snapshot).from_cloudevent(
             event
         )
         await self._send_snapshot_update(snapshot_mutate_event)
         self._stop()
Esempio n. 8
0
 async def _ensemble_failed_handler(self, event):
     if self._snapshot.get_status() not in [
         ENSEMBLE_STATE_STOPPED,
         ENSEMBLE_STATE_CANCELLED,
     ]:
         snapshot_mutate_event = PartialSnapshot(self._snapshot).from_cloudevent(
             event
         )
         await self._send_snapshot_update(snapshot_mutate_event)
Esempio n. 9
0
def test_duration(mock_datetime, timezone, full_snapshot):
    source_model = SnapshotModel()

    model = JobListProxyModel(None, 0, 0, 0, 0)
    model.setSourceModel(source_model)

    reporting_mode = qt_api.QtTest.QAbstractItemModelTester.FailureReportingMode.Warning
    tester = qt_api.QtTest.QAbstractItemModelTester(  # noqa, prevent GC
        model, reporting_mode)

    source_model._add_snapshot(SnapshotModel.prerender(full_snapshot), 0)
    assert (model.index(0, _id_to_col(ids.STATUS),
                        QModelIndex()).data() == JOB_STATE_START)

    partial = PartialSnapshot(full_snapshot)
    start_time = datetime.datetime(year=2020,
                                   month=10,
                                   day=27,
                                   hour=12,
                                   tzinfo=timezone)
    # mock only datetime.datetime.now()
    mock_datetime.datetime.now.return_value = datetime.datetime(
        year=2020,
        month=10,
        day=28,
        hour=13,
        minute=12,
        second=11,
        microsecond=5,  # Note that microseconds are intended to be removed
        tzinfo=timezone,
    )
    partial.update_job(
        "0",
        "0",
        "2",
        job=Job(
            status=JOB_STATE_RUNNING,
            start_time=start_time,
        ),
    )
    source_model._add_partial_snapshot(SnapshotModel.prerender(partial), 0)
    assert (model.index(2, _id_to_col(DURATION),
                        QModelIndex()).data() == "1 day, 1:12:11")
    mock_datetime.datetime.now.assert_called_once_with(timezone)
Esempio n. 10
0
    def track(self):
        while True:
            event = self._work_queue.get()
            if isinstance(event, str):
                try:
                    if event == EvaluatorTracker.DONE:
                        yield EndEvent(
                            failed=self._model.hasRunFailed(),
                            failed_msg=self._model.getFailMessage(),
                        )
                    elif event == EvaluatorTracker.CONNECTION_ERROR:
                        yield EndEvent(
                            failed=True,
                            failed_msg="Connection error",
                        )
                except GeneratorExit:
                    # consumers may exit at this point, make sure the last
                    # task is marked as done
                    pass
                self._work_queue.task_done()
                break
            elif event["type"] == ids.EVTYPE_EE_SNAPSHOT:
                iter_ = event.data["iter"]
                snapshot = Snapshot(event.data)
                self._iter_snapshot[iter_] = snapshot
                yield FullSnapshotEvent(
                    phase_name=self._model.getPhaseName(),
                    current_phase=self._model.currentPhase(),
                    total_phases=self._model.phaseCount(),
                    indeterminate=self._model.isIndeterminate(),
                    progress=self._progress(),
                    iteration=iter_,
                    snapshot=snapshot,
                )
            elif event["type"] == ids.EVTYPE_EE_SNAPSHOT_UPDATE:
                iter_ = event.data["iter"]
                if iter_ not in self._iter_snapshot:
                    raise OutOfOrderSnapshotUpdateException(
                        f"got {ids.EVTYPE_EE_SNAPSHOT_UPDATE} without having stored snapshot for iter {iter_}"
                    )
                partial = PartialSnapshot(
                    self._iter_snapshot[iter_]).from_cloudevent(event)
                self._iter_snapshot[iter_].merge_event(partial)
                yield SnapshotUpdateEvent(
                    phase_name=self._model.getPhaseName(),
                    current_phase=self._model.currentPhase(),
                    total_phases=self._model.phaseCount(),
                    indeterminate=self._model.isIndeterminate(),
                    progress=self._progress(),
                    iteration=iter_,
                    partial_snapshot=partial,
                )

            self._work_queue.task_done()
Esempio n. 11
0
def test_no_cross_talk(full_snapshot):
    source_model = SnapshotModel()

    model = JobListProxyModel(None, 0, 0, 0, 0)
    model.setSourceModel(source_model)

    reporting_mode = qt_api.QtTest.QAbstractItemModelTester.FailureReportingMode.Warning
    qt_api.QtTest.QAbstractItemModelTester(model,
                                           reporting_mode)  # noqa, prevent GC

    source_model._add_snapshot(SnapshotModel.prerender(full_snapshot), 0)
    source_model._add_snapshot(SnapshotModel.prerender(full_snapshot), 1)

    # Test that changes to iter=1 does not bleed into iter=0
    partial = PartialSnapshot(full_snapshot)
    partial.update_job("0", "0", "0", job=Job(status=JOB_STATE_FAILURE))
    source_model._add_partial_snapshot(SnapshotModel.prerender(partial), 1)
    assert (model.index(0, _id_to_col(ids.STATUS),
                        QModelIndex()).data() == JOB_STATE_START)

    model.set_step(1, 0, 0, 0)
    assert (model.index(0, _id_to_col(ids.STATUS),
                        QModelIndex()).data() == JOB_STATE_FAILURE)
Esempio n. 12
0
def mock_ee_monitor(*args):
    reals_ids = ["0", "1"]
    snapshot = (SnapshotBuilder().add_stage(
        stage_id="0", status="Running").add_step(
            stage_id="0", step_id="0", status="Unknown").add_job(
                stage_id="0",
                step_id="0",
                job_id="0",
                name="job0",
                data={},
                status="Running",
            ).add_metadata("iter", 0).build(reals_ids, "Unknown"))

    update = PartialSnapshot(snapshot)
    update.update_step("0", "0", "0", "Finished")
    update.update_step("1", "0", "0", "Finished")

    events = [
        MockCloudEvent(
            {"type": ids.EVTYPE_EE_SNAPSHOT},
            snapshot.to_dict(),
        ),
        MockCloudEvent(
            {"type": ids.EVTYPE_EE_SNAPSHOT_UPDATE},
            update.to_dict(),
        ),
        MockCloudEvent({"type": ids.EVTYPE_EE_TERMINATED}, {}),
    ]

    def _track():
        while True:
            try:
                yield events.pop(0)
            except IndexError:
                return

    return MagicMock(track=MagicMock(side_effect=_track))
Esempio n. 13
0
 def _flush(self, batch: List[CloudEvent]) -> SnapshotUpdateEvent:
     iter_: int = batch[0].data["iter"]
     partial: PartialSnapshot = PartialSnapshot(self._iter_snapshot[iter_])
     for event in batch:
         partial.from_cloudevent(event)
     self._iter_snapshot[iter_].merge_event(partial)
     update_event = SnapshotUpdateEvent(
         phase_name=self._model.getPhaseName(),
         current_phase=self._model.currentPhase(),
         total_phases=self._model.phaseCount(),
         indeterminate=self._model.isIndeterminate(),
         progress=self._progress(),
         iteration=iter_,
         partial_snapshot=partial,
     )
     for _ in range(len(batch)):
         self._work_queue.task_done()
     return update_event
Esempio n. 14
0
def test_snapshot_merge():
    snapshot = _create_snapshot()

    update_event = PartialSnapshot()
    update_event.update_status(status="running")

    snapshot.merge_event(update_event)

    assert snapshot.get_status() == "running"

    update_event = PartialSnapshot()
    update_event.update_job(
        real_id="1",
        stage_id="0",
        step_id="0",
        job_id="0",
        status="Finished",
        start_time=datetime(year=2020, month=10, day=27).isoformat(),
        end_time=datetime(year=2020, month=10, day=28).isoformat(),
        data={"memory": 1000},
    )
    update_event.update_job(
        real_id="1",
        stage_id="0",
        step_id="0",
        job_id="1",
        status="Running",
        start_time=datetime(year=2020, month=10, day=27).isoformat(),
    )
    update_event.update_job(
        real_id="9",
        stage_id="0",
        step_id="0",
        job_id="0",
        status="Running",
        start_time=datetime(year=2020, month=10, day=27).isoformat(),
    )

    snapshot.merge_event(update_event)

    assert snapshot.get_status() == "running"

    assert _dict_equal(
        snapshot.get_job(real_id="1", stage_id="0", step_id="0", job_id="0"),
        {
            "status": "Finished",
            "start_time": "2020-10-27T00:00:00",
            "end_time": "2020-10-28T00:00:00",
            "data": {
                "memory": 1000
            },
            "error": None,
            "name": "job0",
            "stderr": None,
            "stdout": None,
        },
    )
    assert snapshot.get_job(real_id="1", stage_id="0", step_id="0",
                            job_id="1") == {
                                "status": "Running",
                                "start_time": "2020-10-27T00:00:00",
                                "end_time": None,
                                "data": {},
                                "error": None,
                                "name": "job1",
                                "stderr": None,
                                "stdout": None,
                            }

    assert (snapshot.get_job(real_id="9",
                             stage_id="0",
                             step_id="0",
                             job_id="0")["status"] == "Running")
    assert snapshot.get_job(real_id="9", stage_id="0", step_id="0",
                            job_id="0") == {
                                "status": "Running",
                                "start_time": "2020-10-27T00:00:00",
                                "end_time": None,
                                "data": {},
                                "error": None,
                                "name": "job0",
                                "stderr": None,
                                "stdout": None,
                            }
Esempio n. 15
0
    def _add_partial_snapshot(self, partial: PartialSnapshot, iter_: int):
        metadata = partial.data().get(ids.METADATA)
        if not metadata:
            logger.debug("no metadata in partial, ignoring partial")
            return

        if iter_ not in self.root.children:
            logger.debug("no full snapshot yet, ignoring partial")
            return

        if not partial.data().get(ids.REALS):
            logger.debug(f"no realizations in partial for iter {iter_}")
            return

        # Stack onto which we push change events for entities, since we branch
        # the code based on what is in the partial. This way we're guaranteed
        # that the change events will be emitted when the stack is unwound.
        with ExitStack() as stack:
            iter_node = self.root.children[iter_]
            iter_index = self.index(iter_node.row(), 0, QModelIndex())
            iter_index_bottom_right = self.index(iter_node.row(),
                                                 iter_index.column(),
                                                 QModelIndex())
            stack.callback(self.dataChanged.emit, iter_index,
                           iter_index_bottom_right)

            for real_id in iter_node.data[SORTED_REALIZATION_IDS]:
                real = partial.data()[ids.REALS].get(real_id)
                if not real:
                    continue
                real_node = iter_node.children[real_id]
                if real.get(ids.STATUS):
                    real_node.data[ids.STATUS] = real[ids.STATUS]

                real_index = self.index(real_node.row(), 0, iter_index)
                real_index_bottom_right = self.index(
                    real_node.row(),
                    self.columnCount(iter_index) - 1, iter_index)
                stack.callback(self.dataChanged.emit, real_index,
                               real_index_bottom_right)

                for job_id, color in (metadata[REAL_JOB_STATUS_AGGREGATED].get(
                        real_id, {}).items()):
                    real_node.data[REAL_JOB_STATUS_AGGREGATED][job_id] = color
                if real_id in metadata[REAL_STATUS_COLOR]:
                    real_node.data[REAL_STATUS_COLOR] = metadata[
                        REAL_STATUS_COLOR][real_id]

                if not real.get(ids.STEPS):
                    continue

                for step_id, step in real[ids.STEPS].items():
                    step_node = real_node.children[step_id]
                    if step.get(ids.STATUS):
                        step_node.data[ids.STATUS] = step[ids.STATUS]

                    step_index = self.index(step_node.row(), 0, real_index)

                    if not step.get(ids.JOBS):
                        continue

                    for job_id, job in step[ids.JOBS].items():
                        job_node = step_node.children[job_id]

                        job_index = self.index(job_node.row(), 0, step_index)
                        job_index_bottom_right = self.index(
                            job_node.row(),
                            self.columnCount() - 1, step_index)
                        stack.callback(self.dataChanged.emit, job_index,
                                       job_index_bottom_right)

                        if job.get(ids.STATUS):
                            job_node.data[ids.STATUS] = job[ids.STATUS]
                        if job.get(ids.START_TIME):
                            job_node.data[ids.START_TIME] = job[ids.START_TIME]
                        if job.get(ids.END_TIME):
                            job_node.data[ids.END_TIME] = job[ids.END_TIME]
                        if job.get(ids.STDOUT):
                            job_node.data[ids.STDOUT] = job[ids.STDOUT]
                        if job.get(ids.STDERR):
                            job_node.data[ids.STDERR] = job[ids.STDERR]

                        # Errors may be unset as the queue restarts the job
                        job_node.data[ids.ERROR] = (job[ids.ERROR] if job.get(
                            ids.ERROR) else "")

                        for attr in (ids.CURRENT_MEMORY_USAGE,
                                     ids.MAX_MEMORY_USAGE):
                            if job.get(ids.DATA) and attr in job.get(ids.DATA):
                                job_node.data[ids.DATA] = job_node.data[
                                    ids.DATA].set(attr,
                                                  job.get(ids.DATA).get(attr))
Esempio n. 16
0
    def _add_partial_snapshot(self, partial: PartialSnapshot, iter_: int):
        partial_dict = partial.to_dict()
        partial_s = SnapshotDict(**partial_dict)
        if iter_ not in self.root.children:
            logger.debug("no full snapshot yet, bailing")
            return
        iter_index = self.index(iter_, 0, QModelIndex())
        iter_node = self.root.children[iter_]
        if not partial_s.reals:
            logger.debug(f"no realizations in partial for iter {iter_}")
            return
        for real_id in sorted(partial_s.reals, key=int):
            real = partial_s.reals[real_id]
            real_node = iter_node.children[real_id]
            if real.status:
                real_node.data[ids.STATUS] = real.status

            real_index = self.index(real_node.row(), 0, iter_index)
            real_index_bottom_right = self.index(
                real_node.row(),
                self.columnCount(iter_index) - 1, iter_index)

            if not real.steps:
                continue

            for step_id, step in real.steps.items():
                step_node = real_node.children[step_id]
                if step.status:
                    step_node.data[ids.STATUS] = step.status

                step_index = self.index(step_node.row(), 0, real_index)
                step_index_bottom_right = self.index(
                    step_node.row(),
                    self.columnCount(real_index) - 1, real_index)

                if not step.jobs:
                    continue

                for job_id in sorted(step.jobs, key=int):
                    job = step.jobs[job_id]
                    job_node = step_node.children[job_id]

                    if job.status:
                        job_node.data[ids.STATUS] = job.status
                    if job.start_time:
                        job_node.data[ids.START_TIME] = job.start_time
                    if job.end_time:
                        job_node.data[ids.END_TIME] = job.end_time
                    if job.stdout:
                        job_node.data[ids.STDOUT] = job.stdout
                    if job.stderr:
                        job_node.data[ids.STDERR] = job.stderr

                    # Errors may be unset as the queue restarts the job
                    job_node.data[ids.ERROR] = job.error if job.error else ""

                    for attr in (ids.CURRENT_MEMORY_USAGE,
                                 ids.MAX_MEMORY_USAGE):
                        if job.data and attr in job.data:
                            job_node.data[ids.DATA][attr] = job.data.get(attr)

                    job_index = self.index(job_node.row(), 0, step_index)
                    job_index_bottom_right = self.index(
                        job_node.row(),
                        self.columnCount() - 1, step_index)
                    self.dataChanged.emit(job_index, job_index_bottom_right)
                self.dataChanged.emit(step_index, step_index_bottom_right)
            self.dataChanged.emit(real_index, real_index_bottom_right)
            # TODO: there is no check that any of the data *actually* changed
            # https://github.com/equinor/ert/issues/1374

        top_left = self.index(0, 0, iter_index)
        bottom_right = self.index(0, 1, iter_index)
        self.dataChanged.emit(top_left, bottom_right)
Esempio n. 17
0
 async def _ensemble_stopped_handler(self, event):
     if self._snapshot.get_status() != "Failure":
         snapshot_mutate_event = PartialSnapshot(
             self._snapshot).from_cloudevent(event)
         await self._send_snapshot_update(snapshot_mutate_event)
Esempio n. 18
0
 async def _fm_handler(self, event):
     snapshot_mutate_event = PartialSnapshot(
         self._snapshot).from_cloudevent(event)
     await self._send_snapshot_update(snapshot_mutate_event)
Esempio n. 19
0
 async def _ensemble_failed_handler(self, event):
     if self._snapshot.get_status() not in ["Stopped", "Cancelled"]:
         snapshot_mutate_event = PartialSnapshot(
             self._snapshot).from_cloudevent(event)
         await self._send_snapshot_update(snapshot_mutate_event)
Esempio n. 20
0
    def _create_partial_snapshot(
        self,
        run_context: ErtRunContext,
        detailed_progress: typing.Tuple[typing.Dict, int],
        iter_: int,
    ) -> typing.Optional[PartialSnapshot]:
        """Create a PartialSnapshot, or None if the sources of data were
        destroyed or had not been created yet. Both run_context and
        detailed_progress needs to be aligned with the stars if job status etc
        is to be produced. If queue_snapshot is set, this means the the differ
        will not be used to calculate changes."""
        queue = self._iter_queue.get(iter_, None)
        if queue is None:
            logger.debug(f"no queue for {iter_}, no partial returned")
            return None
        queue_snapshot = queue.snapshot()

        snapshot = self._iter_snapshot.get(iter_, None)
        if snapshot is None:
            logger.debug(f"no snapshot for {iter_}, no partial returned")
            return None

        partial = PartialSnapshot(snapshot)

        if queue_snapshot is not None:
            for iens, change in queue_snapshot.items():
                change_enum = JobStatusType.from_string(change)
                partial.update_real(
                    str(iens),
                    Realization(
                        status=queue_status_to_real_state(change_enum)),
                )
        iter_to_progress, progress_iter = detailed_progress
        if not iter_to_progress:
            logger.debug(f"partial: no detailed progress for iter:{iter_}")
            return partial
        if iter_ != progress_iter:
            logger.debug(
                f"partial: iter_to_progress iter ({progress_iter}) differed from run_context ({iter_})"
            )

        for iens, _ in _enumerate_run_context(run_context):
            if not _is_iens_active(iens, run_context):
                continue

            progress = iter_to_progress[iter_].get(iens, None)
            if not progress:
                continue

            jobs = progress[0]
            for idx, fm in enumerate(jobs):
                partial.update_job(
                    str(iens),  # real_id
                    "0",
                    str(idx),
                    Job(
                        status=_map_job_state(fm.status),
                        start_time=fm.start_time,
                        end_time=fm.end_time,
                        data={
                            CURRENT_MEMORY_USAGE: fm.current_memory_usage,
                            MAX_MEMORY_USAGE: fm.max_memory_usage,
                        },
                        stdout=fm.std_out_file,
                        stderr=fm.std_err_file,
                        error=fm.error,
                    ),
                )

        return partial
Esempio n. 21
0
 async def _ensemble_cancelled_handler(self, event):
     snapshot_mutate_event = PartialSnapshot.from_cloudevent(event)
     await self._send_snapshot_update(snapshot_mutate_event)
     self._stop()
Esempio n. 22
0
def test_snapshot_merge(snapshot):
    update_event = PartialSnapshot(snapshot)
    update_event.update_status(status="running")

    snapshot.merge_event(update_event)

    assert snapshot.get_status() == "running"

    update_event = PartialSnapshot(snapshot)
    update_event.update_job(
        real_id="1",
        step_id="0",
        job_id="0",
        job=Job(
            status="Finished",
            start_time=datetime(year=2020, month=10, day=27),
            end_time=datetime(year=2020, month=10, day=28),
            data={"memory": 1000},
        ),
    )
    update_event.update_job(
        real_id="1",
        step_id="0",
        job_id="1",
        job=Job(
            status="Running",
            start_time=datetime(year=2020, month=10, day=27),
        ),
    )
    update_event.update_job(
        real_id="9",
        step_id="0",
        job_id="0",
        job=Job(
            status="Running",
            start_time=datetime(year=2020, month=10, day=27),
        ),
    )

    snapshot.merge_event(update_event)

    assert snapshot.get_status() == "running"

    assert snapshot.get_job(real_id="1", step_id="0", job_id="0") == Job(
        status="Finished",
        start_time=datetime(year=2020, month=10, day=27),
        end_time=datetime(year=2020, month=10, day=28),
        data={"memory": 1000},
        error=None,
        name="job0",
        stderr=None,
        stdout=None,
    )

    assert snapshot.get_job(real_id="1", step_id="0", job_id="1") == Job(
        status="Running",
        start_time=datetime(year=2020, month=10, day=27),
        end_time=None,
        data={},
        error=None,
        name="job1",
        stderr=None,
        stdout=None,
    )

    assert snapshot.get_job(real_id="9", step_id="0",
                            job_id="0").status == "Running"
    assert snapshot.get_job(real_id="9", step_id="0", job_id="0") == Job(
        status="Running",
        start_time=datetime(year=2020, month=10, day=27),
        end_time=None,
        data={},
        error=None,
        name="job0",
        stderr=None,
        stdout=None,
    )
Esempio n. 23
0
def partial_snapshot(snapshot) -> PartialSnapshot:
    partial = PartialSnapshot(snapshot)
    partial.update_real("0", Realization(status=JOB_STATE_FINISHED))
    partial.update_job("0", "0", "0", Job(status=JOB_STATE_FINISHED))
    return partial
Esempio n. 24
0
                     data={},
                     status=state.JOB_STATE_START,
                 )
                 .build(["0"], state.REALIZATION_STATE_UNKNOWN)
             ),
             phase_name="Foo",
             current_phase=0,
             total_phases=1,
             progress=0.25,
             iteration=0,
             indeterminate=False,
         ),
         SnapshotUpdateEvent(
             partial_snapshot=PartialSnapshot(
                 SnapshotBuilder().build(
                     [], status=state.REALIZATION_STATE_FINISHED
                 )
             ),
             phase_name="Foo",
             current_phase=0,
             total_phases=1,
             progress=0.5,
             iteration=0,
             indeterminate=False,
         ),
         EndEvent(failed=False, failed_msg=""),
     ],
     1,
     id="real_less_partial",
 ),
 pytest.param(
Esempio n. 25
0
def build_partial(real_list: List[str] = ["0"]):
    partial = PartialSnapshot(build_snapshot(real_list))
    return partial