コード例 #1
0
    def __init__(
        self,
        coordinator_url: str,
        participant,
        p_args,
        p_kwargs,
        state,
        scalar,
    ):
        # xaynet rust participant
        self._xaynet_participant = xaynet_sdk.Participant(
            coordinator_url, scalar, state)

        # https://github.com/python/cpython/blob/3.9/Lib/multiprocessing/process.py#L80
        # stores the Participant class with its args and kwargs
        # the participant is created in the `run` method to ensure that the participant/ ml
        # model is initialized on the participant thread otherwise the participant lives on the main
        # thread which can created issues with some of the ml frameworks.
        self._participant = participant
        self._p_args = tuple(p_args)
        self._p_kwargs = dict(p_kwargs)

        self._exit_event = threading.Event()
        self._poll_period = Backoff(min_ms=100,
                                    max_ms=10000,
                                    factor=1.2,
                                    jitter=False)

        # global model cache
        self._global_model = None
        self._error_on_fetch_global_model = False

        self._tick_lock = threading.Lock()

        super().__init__(daemon=True)
コード例 #2
0
    def __init__(
        self,
        coordinator_url: str,
        notifier,
        state,
        scalar,
    ):
        # xaynet rust participant
        self._xaynet_participant = xaynet_sdk.Participant(
            coordinator_url, scalar, state)

        self._exit_event = threading.Event()
        self._poll_period = Backoff(min_ms=100,
                                    max_ms=10000,
                                    factor=1.2,
                                    jitter=False)

        # new global model notifier
        self._notifier = notifier

        # calls to an external lib are thread-safe https://stackoverflow.com/a/42023362
        # however, if a user calls `stop` in the middle of the `_tick` call, the
        # `save` method will be executed (which consumes the participant) and every following call
        # will fail with a call on an uninitialized participant. Therefore we lock during `tick`.
        self._tick_lock = threading.Lock()

        super().__init__(daemon=True)
コード例 #3
0
    def __init__(self,
                 connection: Optional[Connection] = None,
                 host: Optional[str] = None,
                 port: Optional[int] = None,
                 loop: Optional[asyncio.AbstractEventLoop] = None,
                 update_interval: int = 60,
                 infer_arming_state: bool = False,
                 alarm: Optional[Alarm] = None):
        if connection is None:
            assert host is not None
            assert port is not None
            assert loop is not None
            connection = IP232Connection(host=host, port=port, loop=loop)

        if alarm is None:
            alarm = Alarm(infer_arming_state=infer_arming_state)

        self.alarm = alarm
        self._on_event_received: Optional[Callable[[BaseEvent], None]] = None
        self._connection = connection
        self._closed = False
        self._backoff = Backoff()
        self._connect_lock = asyncio.Lock()
        self._last_recv: Optional[datetime.datetime] = None
        self._update_interval = update_interval
コード例 #4
0
    def test_integers(self):
        b = Backoff(min_ms=100, max_ms=10000, factor=2)

        self.assertEqual(b.duration(), to_seconds(100.0))
        self.assertEqual(b.duration(), to_seconds(200.0))
        self.assertEqual(b.duration(), to_seconds(400.0))
        b.reset()
        self.assertEqual(b.duration(), to_seconds(100.0))
コード例 #5
0
    def test_jitter(self):
        b = Backoff(min_ms=100.0, max_ms=10000.0, factor=2.0, jitter=True)

        self.assertEqual(b.duration(), to_seconds(100.0))
        self.assert_between(b.duration(), to_seconds(100.0), to_seconds(200.0))
        self.assert_between(b.duration(), to_seconds(100.0), to_seconds(400.0))
        b.reset()
        self.assertEqual(b.duration(), to_seconds(100.0))
コード例 #6
0
    def test_factor(self):
        b = Backoff(min_ms=100, max_ms=10000, factor=1.5)

        self.assertEqual(b.duration(), to_seconds(100.0))
        self.assertEqual(b.duration(), to_seconds(150.0))
        self.assertEqual(b.duration(), to_seconds(225.0))
        b.reset()
        self.assertEqual(b.duration(), to_seconds(100.0))
コード例 #7
0
    def test_min_bigger_than_max(self):
        b = Backoff(min_ms=10000.0, max_ms=1000.0, factor=2)

        self.assertEqual(b.duration(), 1.0)
        self.assertEqual(b.duration(), 1.0)
        self.assertEqual(b.duration(), 1.0)
        b.reset()
        self.assertEqual(b.duration(), 1.0)
コード例 #8
0
 def __init__(self, name, logger):
     super().__init__()
     self.name = name
     self.logger = logger.bind(worker=name)
     self.exit = False
     self.event = threading.Event()
     self.backoff = Backoff(min_ms=100,
                            max_ms=30000,
                            factor=2,
                            jitter=False)
     self.lastWorked = time.time()
コード例 #9
0
 def __init__(self, logger):
     super().__init__('ICloud photo library scraper', logger)
     self.backoff = Backoff(min_ms=1000,
                            max_ms=60000,
                            factor=2,
                            jitter=False)
     self.iterator = None
     self.current = None
     if os.stat(STORAGE_DIR) is None:
         os.mkdir(STORAGE_DIR)
     with open(os.path.join(AUTH_DIR, 'icloud.json'), 'r') as file:
         credentials = json.load(file)
     self._icloud = PyiCloudService(credentials['username'],
                                    password=credentials['password'],
                                    cookie_directory=AUTH_DIR)
コード例 #10
0
import argparse
import os
import time
import logging
import datetime
from justbackoff import Backoff
from bioblend import galaxy
from xunit_wrapper import xunit, xunit_suite, xunit_dump

logging.basicConfig(format='[%(asctime)s][%(lineno)d][%(module)s] %(message)s',
                    level=logging.DEBUG)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("bioblend").setLevel(logging.WARNING)
NOW = datetime.datetime.now()
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
backoff = Backoff(min_ms=100, max_ms=1000 * 60 * 5, factor=2, jitter=False)
BUILD_ID = os.environ.get('BUILD_NUMBER', 'Manual')


def __main__():
    parser = argparse.ArgumentParser(
        description=
        """Script to run all workflows mentioned in workflows_to_test.
    It will import the shared workflows are create histories for each workflow run, prefixed with ``TEST_RUN_<date>:``
    Make sure the yaml has file names identical to those in the data library."""
    )

    parser.add_argument(
        '-k',
        '--api-key',
        '--key',
コード例 #11
0
 def setUp(self):
     self.b = Backoff(min_ms=100.0, max_ms=10000.0, factor=2.0)
コード例 #12
0
def cli(ctx,
        workflow_id,
        invocation_id,
        exit_early=False,
        backoff_min=1,
        backoff_max=60):
    """Given a workflow and invocation id, wait until that invocation is
    complete (or one or more steps have errored)

    This will exit with the following error codes:

    - 0: done successfully
    - 1: running (if --exit_early)
    - 2: failure
    - 3: unknown
    """
    backoff = Backoff(min_ms=backoff_min * 1000,
                      max_ms=backoff_max * 1000,
                      factor=2,
                      jitter=True)

    prev_state = None
    while True:
        # Fetch the current state
        latest_state = ctx.gi.workflows.show_invocation(
            workflow_id, invocation_id)
        # Get step states
        states = [
            step['state'] for step in latest_state['steps']
            if step['state'] is not None and step['state'] != 'deleted'
        ]
        # Get a str based state representation
        state_rep = '|'.join(map(str, states))

        if state_rep != prev_state:
            backoff.reset()
        prev_state = state_rep

        # If it's scheduled, then let's look at steps. Otherwise steps probably don't exist yet.
        if latest_state['state'] == 'scheduled':
            ctx.vlog("Checking workflow %s states: %s", workflow_id, state_rep)

            if exit_early:
                print(json.dumps({'state': 'running', 'job_states': states}))
                ctx.exit(1)

            # Conditions which must be true for all jobs before we can be done
            if all([state == 'ok' for state in states]):
                print(json.dumps({'state': 'done', 'job_states': states}))
                ctx.exit(0)

            # Conditions on which to exit immediately (i.e. due to a failure)
            if any([state in ('error', 'paused') for state in states]):
                print(json.dumps({'state': 'failure', 'job_states': states}))
                ctx.exit(2)
        else:
            ctx.vlog("Waiting for invocation to be scheduled")

            if exit_early:
                print(json.dumps({'state': 'unscheduled'}))
                ctx.exit(0)

        time.sleep(backoff.duration())
    ctx.exit(3)