コード例 #1
0
ファイル: _mixins.py プロジェクト: wtsi-hgi/hgi-registry
    def has_expired(self) -> bool:
        """ Has our entity expired? """
        if self._last_updated is None:
            return True

        age = time.now() - self._last_updated
        return age > self._shelf_life
コード例 #2
0
ファイル: _bases.py プロジェクト: wtsi-hgi/hgi-registry
 def _adaptor(result) -> T.Tuple[str, BaseNode]:
     dn, payload = result
     identity = cls.extract_rdn(dn)
     node = cls(identity, self)
     node._entity._payload = payload
     node._last_updated = time.now()
     return dn, node
コード例 #3
0
ファイル: types.py プロジェクト: wtsi-hgi/shepherd
    def runtime(self) -> T.TimeDelta:
        """ Phase runtime """
        if self.start is None:
            raise PeriodNotStarted("Period has yet to start")

        until = self.finish or time.now()
        return until - self.start
コード例 #4
0
    def generate_latest_count(self, vmid, record=False):
        data = get_data(vmid)
        followers_count = data['data']['follower']

        if record:
            return self._meta_store.insert_latest_count(
                count=followers_count).json
        else:
            return {
                "bili_follower_number": followers_count,
                "bili_record_time": now().strftime("%Y年%m月%d日 %H:%M:%S")
            }
コード例 #5
0
ファイル: _mixins.py プロジェクト: wtsi-hgi/hgi-registry
 async def update(self) -> None:
     await self.__updator__()
     self._last_updated = time.now()
コード例 #6
0
ファイル: test_time.py プロジェクト: wtsi-hgi/hgi-registry
 def test_encoding(self):
     now = time.now().replace(microsecond=0)
     encoded = json.dumps(now, cls=time.JSONEncoder)
     self.assertEqual(now, datetime.strptime(encoded, f"\"{time.ISO8601}\""))
コード例 #7
0
def transfer(job_id: str) -> None:
    """ Transfer prepared tasks from Lustre to iRODS """
    _LOG_HEADER()

    state = _GET_STATE()
    state.register_filesystems(*_FILESYSTEMS)
    job = State.Job(state, client_id=_CLIENT, job_id=int(job_id))

    executor = _GET_EXECUTOR()
    worker = executor.worker

    log.info(f"Transfer phase: Worker {worker.id.worker}")

    # This is when we should wrap-up
    deadline = _START_TIME + worker.limit(LSFWorkerLimit.Runtime) - _FUDGE_TIME

    # HACK: Load metadata
    with T.Path(job.metadata.shitty_metadata).open() as metadata_handle:
        metadata = json.load(metadata_handle)

    # Don't start the transfer phase until preparation has started
    while job.status.phase(_PREPARE).start is None:
        # Check we're not going to overrun the limit (which shouldn't
        # happen when just waiting for the preparation phase to start)
        if time.now() > deadline:
            log.info("Approaching runtime limit; terminating")
            sys.exit(0)

        log.info("Waiting for preparation phase to start...")
        sleep(_FUDGE_TIME.total_seconds())

    # Initialise the transfer phase (idempotent)
    job.status.phase(_TRANSFER).init()
    if job.status.complete:
        log.info("Nothing left do to for this worker")
        sys.exit(0)

    # Launch follow-on worker, in case we run out of time
    # NOTE DAISYCHAIN can be set to abort accidental LSF proliferation
    following = job.metadata.DAISYCHAIN == _DAISYCHAIN_TRUE
    if following:
        follow_on, follow_options = _transfer_worker(job_id,
                                                     T.Path(job.metadata.logs))
        follow_on.specific_worker = worker.id.worker
        follow_on += worker.id
        follow_runner, *_ = executor.submit(follow_on, follow_options)

        log.info(
            f"Follow-on worker submitted with LSF ID {follow_runner.job}; "
            "will cancel on completion")

    log.info("Starting transfers")

    while not job.status.complete:
        remaining_time = deadline - time.now()

        try:
            attempt = job.attempt(remaining_time)

        except NoTasksAvailable:
            # Check if we're done
            current = job.status

            if current.phase(_PREPARE) or current.pending > 0:
                # Preparation phase is still in progress, or there are
                # still pending tasks
                log.warning(
                    "Cannot complete any more tasks in the given run limit; terminating"
                )

            else:
                # All tasks have been prepared and none are pending, so
                # cancel the follow-on
                log.info("Nothing left do to for this worker")

                if following:
                    log.info(
                        f"Cancelling follow-on worker with LSF ID {follow_runner.job}"
                    )
                    executor.signal(follow_runner, SIGTERM)

                # If no tasks are in-flight, then we're finished
                if current.running == 0:
                    log.info(f"All tasks complete")
                    job.status.phase(_TRANSFER).stop()

            sys.exit(0)

        # TODO Py3.8 walrus operator would be good here
        success = attempt()
        if success:
            log.info(
                f"Successfully transferred and verified {_human_size(attempt.size(DataOrigin.Source))}B"
            )

            # HACK: Set metadata
            target = attempt.task.target
            log.info(
                f"Applying metadata to {target.address} on {target.filesystem}"
            )
            target.filesystem.set_metadata(
                target.address, **{
                    **metadata, "source": str(attempt.task.source.address)
                })
コード例 #8
0
from lib import __version__ as lib_version
from lib.execution import types as Exec
from lib.execution.lsf import LSF, LSFSubmissionOptions
from lib.execution.lsf.context import LSFWorkerLimit
from lib.planning.route_factories import posix_to_irods_factory
from lib.planning.transformers import strip_common_prefix, prefix, telemetry, debugging
from lib.state import postgresql as State
from lib.state.exceptions import DataException, NoThroughputData, NoTasksAvailable
from lib.state.types import BasePhaseStatus, JobPhase, DependentTask, DataOrigin

_CLIENT = "dummy"

_BINARY = T.Path(sys.argv[0]).resolve()

# Approximate start time for the process, plus a conservative threshold
_START_TIME = time.now()
_FUDGE_TIME = time.delta(minutes=3)

_FILESYSTEMS = (POSIXFilesystem(name="Lustre", max_concurrency=50),
                iRODSFilesystem(name="iRODS", max_concurrency=10))

# These are lambdas because we haven't, at this point, checked the
# necessary environment variables are set
_GET_EXECUTOR = lambda: LSF(T.Path(os.environ["LSF_CONFIG"]))
_GET_STATE = lambda: State.PostgreSQL(database=os.environ["PG_DATABASE"],
                                      user=os.environ["PG_USERNAME"],
                                      password=os.environ["PG_PASSWORD"],
                                      host=os.environ["PG_HOST"],
                                      port=int(os.getenv("PG_PORT", "5432")))

_LOG_HEADER = lambda: log.info(
コード例 #9
0
 def get_yesterday_data(self, vmid):
     return self._session.query(BLCountRecord)\
         .filter(extract('day', BLCountRecord.record_time) == now().day - 1, BLCountRecord.vmid == vmid)\
         .order_by(desc(BLCountRecord.record_time))\
         .first()