Example #1
0
def test__read_text(c, s, a, b):
    with make_hdfs() as (hdfs, basedir):
        with hdfs.open('%s/text.1.txt' % basedir, 'wb') as f:
            f.write('Alice 100\nBob 200\nCharlie 300'.encode())

        with hdfs.open('%s/text.2.txt' % basedir, 'wb') as f:
            f.write('Dan 400\nEdith 500\nFrank 600'.encode())

        with hdfs.open('%s/other.txt' % basedir, 'wb') as f:
            f.write('a b\nc d'.encode())

        b = db.read_text('hdfs://%s/text.*.txt' % basedir)
        yield gen.sleep(0.5)
        assert not s.tasks

        import dask
        b.compute(get=dask.get)

        coll = b.str.strip().str.split().map(len)

        future = c.compute(coll)
        yield gen.sleep(0.5)
        result = yield future._result()
        assert result == [2, 2, 2, 2, 2, 2]

        b = db.read_text('hdfs://%s/other.txt' % basedir)
        b = c.persist(b)
        future = c.compute(b.str.split().concat())
        result = yield future._result()
        assert result == ['a', 'b', 'c', 'd']
Example #2
0
def test_scale_retires_workers():
    class MyCluster(LocalCluster):
        def scale_down(self, *args, **kwargs):
            pass

    loop = IOLoop.current()
    cluster = yield MyCluster(0, scheduler_port=0, processes=False,
                              silence_logs=False, diagnostics_port=None,
                              loop=loop, asynchronous=True)
    c = yield Client(cluster, loop=loop, asynchronous=True)

    assert not cluster.workers

    yield cluster.scale(2)

    start = time()
    while len(cluster.scheduler.workers) != 2:
        yield gen.sleep(0.01)
        assert time() < start + 3

    yield cluster.scale(1)

    start = time()
    while len(cluster.scheduler.workers) != 1:
        yield gen.sleep(0.01)
        assert time() < start + 3

    yield c.close()
    yield cluster.close()
def test_failed_worker_without_warning(c, s, a, b):
    L = c.map(inc, range(10))
    yield wait(L)

    original_pid = a.pid
    with ignoring(CommClosedError):
        yield c._run(os._exit, 1, workers=[a.worker_address])
    start = time()
    while a.pid == original_pid:
        yield gen.sleep(0.01)
        assert time() - start < 10

    yield gen.sleep(0.5)

    start = time()
    while len(s.ncores) < 2:
        yield gen.sleep(0.01)
        assert time() - start < 10

    yield wait(L)

    L2 = c.map(inc, range(10, 20))
    yield wait(L2)
    assert all(len(keys) > 0 for keys in s.has_what.values())
    ncores2 = dict(s.ncores)

    yield c._restart()

    L = c.map(inc, range(10))
    yield wait(L)
    assert all(len(keys) > 0 for keys in s.has_what.values())

    assert not (set(ncores2) & set(s.ncores))  # no overlap
Example #4
0
def run_worker(context):
    poller = Poller()

    liveness = HEARTBEAT_LIVENESS
    interval = INTERVAL_INIT

    heartbeat_at = time.time() + HEARTBEAT_INTERVAL

    worker = yield worker_socket(context, poller)
    cycles = 0
    while True:
        socks = yield poller.poll(HEARTBEAT_INTERVAL * 1000)
        socks = dict(socks)

        # Handle worker activity on backend
        if socks.get(worker) == zmq.POLLIN:
            #  Get message
            #  - 3-part envelope + content -> request
            #  - 1-part HEARTBEAT -> heartbeat
            frames = yield worker.recv_multipart()
            if not frames:
                break    # Interrupted

            if len(frames) == 3:
                # Simulate various problems, after a few cycles
                cycles += 1
                if cycles > 3 and randint(0, 5) == 0:
                    print("I: Simulating a crash")
                    break
                if cycles > 3 and randint(0, 5) == 0:
                    print("I: Simulating CPU overload")
                    yield gen.sleep(3)
                print("I: Normal reply")
                yield worker.send_multipart(frames)
                liveness = HEARTBEAT_LIVENESS
                yield gen.sleep(1)  # Do some heavy work
            elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT:
                print("I: Queue heartbeat")
                liveness = HEARTBEAT_LIVENESS
            else:
                print("E: Invalid message: %s" % frames)
            interval = INTERVAL_INIT
        else:
            liveness -= 1
            if liveness == 0:
                print("W: Heartbeat failure, can't reach queue")
                print("W: Reconnecting in %0.2fs..." % interval)
                yield gen.sleep(interval)

                if interval < INTERVAL_MAX:
                    interval *= 2
                poller.unregister(worker)
                worker.setsockopt(zmq.LINGER, 0)
                worker.close()
                worker = yield worker_socket(context, poller)
                liveness = HEARTBEAT_LIVENESS
        if time.time() > heartbeat_at:
            heartbeat_at = time.time() + HEARTBEAT_INTERVAL
            print("I: Worker heartbeat")
            yield worker.send(PPP_HEARTBEAT)
Example #5
0
def test_upload_large_file(c, s, a, b):
    pytest.importorskip('crick')
    yield gen.sleep(0.05)
    with rpc(a.address) as aa:
        yield aa.upload_file(filename='myfile.dat', data=b'0' * 100000000)
        yield gen.sleep(0.05)
        assert a.digests['tick-duration'].components[0].max() < 0.050
def test_worker_who_has_clears_after_failed_connection(c, s, a, b):
    n = Nanny(s.ip, s.port, ncores=2, loop=s.loop)
    n.start(0)

    start = time()
    while len(s.ncores) < 3:
        yield gen.sleep(0.01)
        assert time() < start + 5

    futures = c.map(slowinc, range(20), delay=0.01,
                    key=['f%d' % i for i in range(20)])
    yield wait(futures)

    result = yield c.submit(sum, futures, workers=a.address)
    for dep in set(a.dep_state) - set(a.task_state):
        a.release_dep(dep, report=True)

    n_worker_address = n.worker_address
    with ignoring(CommClosedError):
        yield c._run(os._exit, 1, workers=[n_worker_address])

    while len(s.workers) > 2:
        yield gen.sleep(0.01)

    total = c.submit(sum, futures, workers=a.address)
    yield total

    assert not a.has_what.get(n_worker_address)
    assert not any(n_worker_address in s for s in a.who_has.values())

    yield n._close()
Example #7
0
    def __wait(self):
        log.debug("Waiting for events")
        while not (yield sleep(0.001)):
            try:
                state = self.__connection.poll()
            except QueryCanceledError:
                yield sleep(0.1)
                continue

            f = Future()

            def resolve(fileno, io_op):
                if f.running():
                    f.set_result(True)
                self.__io_loop.remove_handler(fileno)

            if state == psycopg2.extensions.POLL_OK:
                raise Return(True)

            elif state == psycopg2.extensions.POLL_READ:
                self.__io_loop.add_handler(self.__connection.fileno(), resolve, IOLoop.READ)
                yield f

            elif state == psycopg2.extensions.POLL_WRITE:
                self.__io_loop.add_handler(self.__connection.fileno(), resolve, IOLoop.WRITE)
                yield f
Example #8
0
	def test_Royal_Seal(self):
		tu.print_test_header("test Royal Seal")
		royal_seal = prosperity.Royal_Seal(self.game, self.player1)
		workers_village = prosperity.Workers_Village(self.game, self.player1)
		copper = supply_cards.Copper(self.game, self.player1)
		self.player1.hand.add(royal_seal)
		self.player1.hand.add(copper)
		tu.add_many_to_hand(self.player1, workers_village, 3)

		workers_village.play()
		workers_village.play()
		workers_village.play()

		royal_seal.play()
		yield tu.send_input(self.player1, "buyCard", "Curse")
		self.assertTrue(self.player1.last_mode["mode"] == "select")
		yield tu.send_input(self.player1, "post_selection", ["Yes"])
		self.assertTrue(self.player1.deck[-1].title == "Curse")
		yield gen.sleep(.2)
		self.assertTrue(self.player1.last_mode["mode"] == "buy")

		yield tu.send_input(self.player1, "buyCard", "Silver")
		self.assertTrue(self.player1.last_mode["mode"] == "select")

		yield tu.send_input(self.player1, "post_selection", ["No"])
		self.assertTrue(self.player1.discard_pile[-1].title == "Silver")
		yield gen.sleep(.2)
		self.assertTrue(self.player1.last_mode["mode"] == "buy")
		yield tu.send_input(self.player1, "buyCard", "Mint")
		self.assertTrue(self.player1.last_mode["mode"] == "buy")
Example #9
0
def send():
    c = BufferedClient()
    c.send({'host': 'localhost', 'service': 'buffer test'})
    yield sleep(0.5)
    c.send({'host': 'localhost', 'service': 'buffer test'})
    c.send({'host': 'localhost', 'service': 'buffer test'})
    yield sleep(1.5)
Example #10
0
def test_serializers():
    with echo_server() as e:
        comm = yield connect(e.address)

        b = BatchedSend(interval='10ms', serializers=['msgpack'])
        b.start(comm)

        b.send({'x': to_serialize(123)})
        b.send({'x': to_serialize('hello')})
        yield gen.sleep(0.100)

        b.send({'x': to_serialize(lambda x: x + 1)})

        with captured_logger('distributed.protocol') as sio:
            yield gen.sleep(0.100)

        value = sio.getvalue()
        assert 'serialize' in value
        assert 'type' in value
        assert 'function' in value

        msg = yield comm.read()
        assert list(msg) == [{'x': 123}, {'x': 'hello'}]

        with pytest.raises(gen.TimeoutError):
            msg = yield gen.with_timeout(timedelta(milliseconds=100), comm.read())
Example #11
0
    def _call(self, api_function, *args, **kwargs):
        """Calls the provided api_function in a background thread.

        If the api function returns a response cleanly, this will return it.
        If the api function raises an exception, this raises it up.

        For as long as the api function returns a boto2 or boto3
        rate limiting exception, this will backoff and try again.
        """
        while True:
            try:
                result = yield self._thread(api_function, *args, **kwargs)
                self._decrease_delay()
                raise gen.Return(result)
            except boto_exception.BotoServerError as e:
                # Boto2 exception.
                if e.error_code in self.boto2_throttle_strings:
                    self._increase_delay()
                    yield gen.sleep(self.delay)
                else:
                    self._decrease_delay()
                    raise e
            except botocore_exceptions.ClientError as e:
                # Boto3 exception.
                if e.response['Error']['Code'] == 'Throttling':
                    self._increase_delay()
                    yield gen.sleep(self.delay)
                else:
                    self._decrease_delay()
                    raise e
Example #12
0
def test_advertiser_intermediate_failure():

    @gen.coroutine
    def handle(request, response):
        body = yield request.get_body()
        if hb.count == 2:
            # fail the second request only
            raise Exception('great sadness')
        response.write_body(body)

    hb = Fakebahn(handle)
    try:
        hb.start()
        adv = hyperbahn.Advertiser(
            'foo', TChannel('foo', known_peers=[hb.hostport]),
            interval_secs=0.2,
            interval_max_jitter_secs=0.0,
        )

        yield adv.start()
        assert 1 == hb.count

        yield gen.sleep(0.25)
        assert 2 == hb.count

        yield gen.sleep(0.25)
        assert 3 == hb.count
    finally:
        hb.stop()
Example #13
0
 def get(self, path):
     log.debug("Slow Sending file: %s (This may take several minutes)" % self.request.path)
     seenfiles.append(os.path.basename(str(self.request.path)))
     f = open(os.path.join('static', path), 'rb')
     f.seek(0, 2)
     length = f.tell()
     f.seek(0, 0)
     self.set_header('Content-Length', str(length))
     self.set_header('X-Powered-By', 'Express')
     self.set_header('Transfer-Encoding', '')
     self.set_header('Content-Disposition',
             'attachment; filename="{}"'.format(path))
     self.set_header('Accept-Ranges', 'bytes')
     self.set_header('Cache-Control', 'public, max-age=0')
     self.set_header('Content-Type', 'application/octet-stream')
     chunk = f.read(10)
     self.write(chunk)
     yield self.flush()
     yield gen.sleep(0.9)
     while True:
         chunk = f.read(1400)
         if not chunk:
             break
         self.write(chunk)
         yield self.flush()
         print("  {}%   ".format(int(f.tell()*100/length)), end="\r", flush=True)
         yield gen.sleep(0.3)
Example #14
0
    def traktToken(self, trakt_pin=None, refresh=False, count=0):

        if count > 3:
            sickrage.TRAKT_ACCESS_TOKEN = ''
            return False
        elif count > 0:
            gen.sleep(2)

        data = {
            'client_id': sickrage.TRAKT_API_KEY,
            'client_secret': sickrage.TRAKT_API_SECRET,
            'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob'
        }

        if refresh:
            data['grant_type'] = 'refresh_token'
            data['refresh_token'] = sickrage.TRAKT_REFRESH_TOKEN
        else:
            data['grant_type'] = 'authorization_code'
            if not None == trakt_pin:
                data['code'] = trakt_pin

        headers = {
            'Content-Type': 'application/json'
        }

        resp = self.traktRequest('oauth/token', data=data, headers=headers, url=self.auth_url, method='POST',
                                 count=count)

        if 'access_token' in resp:
            sickrage.TRAKT_ACCESS_TOKEN = resp['access_token']
            if 'refresh_token' in resp:
                sickrage.TRAKT_REFRESH_TOKEN = resp['refresh_token']
            return True
        return False
Example #15
0
    def f():
        nn = rpc(ip=n.ip, port=n.port)
        yield n._start()

        ww = rpc(ip=n.ip, port=n.worker_port)
        yield ww.update_data(data={'x': 1, 'y': 2})
        with ignoring(StreamClosedError):
            yield ww.compute(function=sys.exit, args=(0,), key='z')

        start = time()
        while n.process.is_alive():  # wait while process dies
            yield gen.sleep(0.01)
            assert time() - start < 2

        start = time()
        while not n.process.is_alive():  # wait while process comes back
            yield gen.sleep(0.01)
            assert time() - start < 2

        start = time()
        while n.worker_address not in c.ncores:
            yield gen.sleep(0.01)
            assert time() - start < 2

        yield n._close()
        c.stop()
Example #16
0
    def f():
        yield a._start()
        yield b._start()
        yield s._sync_center()
        done = s.start()

        try:
            assert s.ncores == {('127.0.0.1', a.worker_port): 2,
                                ('127.0.0.1', b.worker_port): 2}
            assert s.nannies == {(n.ip, n.worker_port): n.port
                                 for n in [a, b]}

            while any(len(v) < 3 for v in s.resource_logs.values()):
                yield gen.sleep(0.01)

            yield gen.sleep(0.1)

            assert set(s.resource_logs) == {(a.ip, a.port), (b.ip, b.port)}
            assert all(len(v) == 3 for v in s.resource_logs.values())

            s.put({'op': 'close'})
            yield done
        finally:
            yield a._close()
            yield b._close()
            c.stop()
Example #17
0
def test_broken_worker_during_computation(c, s, a, b):
    n = Nanny(s.ip, s.port, ncores=2, loop=s.loop)
    n.start(0)

    start = time()
    while len(s.ncores) < 3:
        yield gen.sleep(0.01)
        assert time() < start + 5

    L = c.map(inc, range(256))
    for i in range(8):
        L = c.map(add, *zip(*partition_all(2, L)))

    from random import random
    yield gen.sleep(random() / 2)
    with ignoring(OSError):
        n.process.terminate()
    yield gen.sleep(random() / 2)
    with ignoring(OSError):
        n.process.terminate()

    result = yield c._gather(L)
    assert isinstance(result[0], int)

    yield n._close()
Example #18
0
def wait_for_http_server(url, timeout=10):
    """Wait for an HTTP Server to respond at url
    
    Any non-5XX response code will do, even 404.
    """
    loop = ioloop.IOLoop.current()
    tic = loop.time()
    client = AsyncHTTPClient()
    while loop.time() - tic < timeout:
        try:
            r = yield client.fetch(url, follow_redirects=False)
        except HTTPError as e:
            if e.code >= 500:
                # failed to respond properly, wait and try again
                if e.code != 599:
                    # we expect 599 for no connection,
                    # but 502 or other proxy error is conceivable
                    app_log.warn("Server at %s responded with error: %s", url, e.code)
                yield gen.sleep(0.1)
            else:
                app_log.debug("Server at %s responded with %s", url, e.code)
                return
        except (OSError, socket.error) as e:
            if e.errno not in {errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET}:
                app_log.warn("Failed to connect to %s (%s)", url, e)
            yield gen.sleep(0.1)
        else:
            return
    
    raise TimeoutError("Server at {url} didn't respond in {timeout} seconds".format(
        **locals()
    ))
Example #19
0
def test_failed_worker_without_warning(c, s, a, b):
    L = c.map(inc, range(10))
    yield _wait(L)

    original_process = a.process
    a.process.terminate()
    start = time()
    while a.process is original_process and not isalive(a.process):
        yield gen.sleep(0.01)
        assert time() - start < 10

    yield gen.sleep(0.5)

    start = time()
    while len(s.ncores) < 2:
        yield gen.sleep(0.01)
        assert time() - start < 10

    yield _wait(L)

    L2 = c.map(inc, range(10, 20))
    yield _wait(L2)
    assert all(len(keys) > 0 for keys in s.has_what.values())
    ncores2 = s.ncores.copy()

    yield c._restart()

    L = c.map(inc, range(10))
    yield _wait(L)
    assert all(len(keys) > 0 for keys in s.has_what.values())

    assert not (set(ncores2) & set(s.ncores))  # no overlap
Example #20
0
    def do_connect(self, reconnect = False):
        # Create the socket and connect to the server
        if reconnect == True:
            logger.warning('Connection failed, retrying in '+str(self._retrydelay)+ ' seconds')
            yield gen.sleep(self._retrydelay)

        while self._connection == None:
            logger.debug('Connecting to {}:{}'.format(config.ENVISALINKHOST, config.ENVISALINKPORT))
            try:
                self._connection = yield self.tcpclient.connect(config.ENVISALINKHOST, config.ENVISALINKPORT)
                self._connection.set_close_callback(self.handle_close)
            except StreamClosedError:
                #failed to connect, but got no connection object so we will loop here
                logger.warning('Connection failed, retrying in '+str(self._retrydelay)+ ' seconds')
                yield gen.sleep(self._retrydelay)
                continue

            try:
                line = yield self._connection.read_until(self._terminator)
            except StreamClosedError:
                #in this state, since the connection object isnt none, its going to throw the callback for handle_close so we just bomb out.
                #and let handle_close deal with this
                return

            logger.debug("Connected to %s:%i" % (config.ENVISALINKHOST, config.ENVISALINKPORT))
            self.handle_line(line)
Example #21
0
File: tests.py Project: FSX/momoko
    def test_pool_shrinking_with_shrink_delay(self):
        db = yield self.build_pool(auto_shrink=True, shrink_delay=datetime.timedelta(seconds=1),
                                   shrink_period=datetime.timedelta(milliseconds=500))
        f1 = db.execute("SELECT 1")
        f2 = db.execute("SELECT 2")
        f3 = db.execute("SELECT 3")
        f4 = db.execute("SELECT 4")
        f5 = db.execute("SELECT 5")
        cursors = yield [f1, f2, f3, f4, f5]
        yield gen.sleep(.7)

        self.assertEqual(db.conns.total, 5)
        self.assertEqual(cursors[0].fetchone()[0], 1)
        self.assertEqual(cursors[1].fetchone()[0], 2)
        self.assertEqual(cursors[2].fetchone()[0], 3)
        self.assertEqual(cursors[3].fetchone()[0], 4)
        self.assertEqual(cursors[4].fetchone()[0], 5)

        f1 = db.execute("SELECT 1")
        f2 = db.execute("SELECT 2")
        f3 = db.execute("SELECT 3")
        cursors = yield [f1, f2, f3]
        self.assertEqual(cursors[0].fetchone()[0], 1)
        self.assertEqual(cursors[1].fetchone()[0], 2)
        self.assertEqual(cursors[2].fetchone()[0], 3)

        yield gen.sleep(1)

        self.assertEqual(db.conns.total, 3)
Example #22
0
def test_spawner_poll(db):
    first_spawner = new_spawner(db)
    user = first_spawner.user
    yield first_spawner.start()
    proc = first_spawner.proc
    status = yield first_spawner.poll()
    assert status is None
    if user.state is None:
        user.state = {}
    first_spawner.orm_spawner.state = first_spawner.get_state()
    assert 'pid' in first_spawner.orm_spawner.state
    
    # create a new Spawner, loading from state of previous
    spawner = new_spawner(db, user=first_spawner.user)
    spawner.start_polling()
    
    # wait for the process to get to the while True: loop
    yield gen.sleep(1)
    status = yield spawner.poll()
    assert status is None
    
    # kill the process
    proc.terminate()
    for i in range(10):
        if proc.poll() is None:
            yield gen.sleep(1)
        else:
            break
    assert proc.poll() is not None

    yield gen.sleep(2)
    status = yield spawner.poll()
    assert status is not None
Example #23
0
def test_spawner_poll(db, io_loop):
    first_spawner = new_spawner(db)
    user = first_spawner.user
    io_loop.run_sync(first_spawner.start)
    proc = first_spawner.proc
    status = io_loop.run_sync(first_spawner.poll)
    assert status is None
    user.state = first_spawner.get_state()
    assert "pid" in user.state

    # create a new Spawner, loading from state of previous
    spawner = new_spawner(db, user=first_spawner.user)
    spawner.start_polling()

    # wait for the process to get to the while True: loop
    io_loop.run_sync(lambda: gen.sleep(1))
    status = io_loop.run_sync(spawner.poll)
    assert status is None

    # kill the process
    proc.terminate()
    for i in range(10):
        if proc.poll() is None:
            time.sleep(1)
        else:
            break
    assert proc.poll() is not None

    io_loop.run_sync(lambda: gen.sleep(2))
    status = io_loop.run_sync(spawner.poll)
    assert status is not None
Example #24
0
    def run(self):
        QueueItem.run(self)

        try:
            sickrage.srLogger.info("Beginning daily search for new episodes")
            foundResults = searchForNeededEpisodes()

            if not len(foundResults):
                sickrage.srLogger.info("No needed episodes found")
            else:
                for result in foundResults:
                    # just use the first result for now
                    sickrage.srLogger.info("Downloading " + result.name + " from " + result.provider.name)
                    self.success = snatchEpisode(result)

                    # give the CPU a break
                    gen.sleep(cpu_presets[sickrage.srConfig.CPU_PRESET])

            QueueItem.finish(self)
        except Exception:
            sickrage.srLogger.debug(traceback.format_exc())

        if self.success is None:
            self.success = False

        self.finish()
Example #25
0
    def test_here_now_multiple_channels(self):
        ch1 = 'test-here-now-channel1'
        ch2 = 'test-here-now-channel2'
        self.pubnub.config.uuid = 'test-here-now-uuid'
        # print("connecting to the first...")
        yield connect_to_channel(self.pubnub, ch1)
        # print("...connected to the first")
        yield gen.sleep(1)
        # print("connecting to the second...")
        self.pubnub.subscribe().channels(ch2).execute()
        # print("...connected to the second")
        yield gen.sleep(5)
        env = yield self.pubnub.here_now() \
            .channels([ch1, ch2]) \
            .future()

        assert env.result.total_channels == 2
        assert env.result.total_occupancy >= 1

        channels = env.result.channels

        assert len(channels) == 2
        assert channels[0].occupancy >= 1
        assert channels[0].occupants[0].uuid == self.pubnub.uuid
        assert channels[1].occupancy >= 1
        assert channels[1].occupants[0].uuid == self.pubnub.uuid

        yield disconnect_from_channel(self.pubnub, [ch1, ch2])
        self.pubnub.stop()
        self.stop()
Example #26
0
    def run(self):
        QueueItem.run(self)

        try:
            sickrage.srLogger.info("Beginning manual search for: [" + self.segment.prettyName() + "]")
            self.started = True

            searchResult = searchProviders(self.show, [self.segment], True, self.downCurQuality)

            if searchResult:
                # just use the first result for now
                sickrage.srLogger.info("Downloading " + searchResult[0].name + " from " + searchResult[0].provider.name)
                self.success = snatchEpisode(searchResult[0])

                # give the CPU a break
                gen.sleep(cpu_presets[sickrage.srConfig.CPU_PRESET])

            else:
                notifications.message('No downloads were found',
                                      "Couldn't find a download for <i>%s</i>" % self.segment.prettyName())

                sickrage.srLogger.info("Unable to find a download for: [" + self.segment.prettyName() + "]")

        except Exception:
            sickrage.srLogger.debug(traceback.format_exc())

        ### Keep a list with the 100 last executed searches
        fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE)

        if self.success is None:
            self.success = False

        self.finish()
Example #27
0
    def do_connect(self, reconnect = False):
        # Create the socket and connect to the server
        if reconnect == True:
            logger.warning('Connection failed, retrying in %s seconds' % str(self._retrydelay))
            yield gen.sleep(self._retrydelay)

        while self._connection == None:
            logger.debug('Connecting to {}:{}'.format(config.ENVISALINKHOST, config.ENVISALINKPORT))
            try:
                self._connection = yield self.tcpclient.connect(config.ENVISALINKHOST, config.ENVISALINKPORT)
                self._connection.set_close_callback(self.handle_close)
            except StreamClosedError:
                #failed to connect, but got no connection object so we will loop here
                logger.warning('Connection failed, retrying in %s seconds' % str(self._retrydelay))
                yield gen.sleep(self._retrydelay)
                continue
            except gaierror:
                #could not resolve host provided, if this is a reconnect, will retry, if not, will fail
                if reconnect == True:
                    logger.warning('Connection failed, unable to resolve hostname %s, retrying in %s seconds' % (config.ENVISALINKHOST, str(self._retrydelay)))
                    yield gen.sleep(self._retrydelay)
                    continue
                else:
                    logger.warning('Connection failed, unable to resolve hostname %s.  Exiting due to incorrect hostname.' % config.ENVISALINKHOST)
                    sys.exit(0)

            try:
                line = yield self._connection.read_until(self._terminator)
            except StreamClosedError:
                #in this state, since the connection object isnt none, its going to throw the callback for handle_close so we just bomb out.
                #and let handle_close deal with this
                return

            logger.debug("Connected to %s:%i" % (config.ENVISALINKHOST, config.ENVISALINKPORT))
            self.handle_line(line)
Example #28
0
    def instantiate(self, stream=None, environment=None):
        """ Start a local worker process

        Blocks until the process is up and the scheduler is properly informed
        """
        if environment:
            if not os.path.isabs(environment):
                environment = os.path.join(self.local_dir, environment)
            self.environment = environment

        with log_errors():
            if self.process and isalive(self.process):
                raise ValueError("Existing process still alive. Please kill first")

            if self.environment != nanny_environment:
                with tmpfile() as fn:
                    self.process = run_worker_subprocess(self.environment, self.ip,
                            self.scheduler.ip, self.scheduler.port, self.ncores,
                            self.port, self._given_worker_port, self.name,
                            self.memory_limit, self.loop, fn, self.quiet)

                    while not os.path.exists(fn):
                        yield gen.sleep(0.01)

                    while True:
                        try:
                            with open(fn) as f:
                                msg = json.load(f)
                            self.worker_port = msg['port']
                            self.worker_dir = msg['local_directory']
                            break
                        except JSONDecodeError:
                            yield gen.sleep(0.01)
            else:
                q = Queue()
                self.process = Process(target=run_worker_fork,
                                       args=(q, self.ip, self.scheduler.ip,
                                             self.scheduler.port, self.ncores,
                                             self.port, self._given_worker_port,
                                             self.local_dir, self.services, self.name,
                                             self.memory_limit))
                self.process.daemon = True
                self.process.start()
                while True:
                    try:
                        msg = q.get_nowait()
                        if isinstance(msg, Exception):
                            raise msg
                        self.worker_port = msg['port']
                        self.worker_dir = msg['dir']
                        assert self.worker_port
                        break
                    except queues.Empty:
                        yield gen.sleep(0.1)



            logger.info("Nanny %s:%d starts worker process %s:%d",
                        self.ip, self.port, self.ip, self.worker_port)
            raise gen.Return('OK')
Example #29
0
    def f():
        nn = rpc(ip=n.ip, port=n.port)
        yield n._start()
        first_dir = n.worker_dir

        assert os.path.exists(first_dir)

        ww = rpc(ip=n.ip, port=n.worker_port)
        yield ww.update_data(data={'x': 1, 'y': 2})
        with ignoring(StreamClosedError):
            yield ww.compute(function=sys.exit, args=(0,), key='z')

        start = time()
        while n.process.is_alive():  # wait while process dies
            yield gen.sleep(0.01)
            assert time() - start < 2

        start = time()
        while not n.process.is_alive():  # wait while process comes back
            yield gen.sleep(0.01)
            assert time() - start < 2

        start = time()
        while n.worker_address not in c.ncores or n.worker_dir is None:
            yield gen.sleep(0.01)
            assert time() - start < 2

        second_dir = n.worker_dir

        yield n._close()
        assert not os.path.exists(second_dir)
        assert not os.path.exists(first_dir)
        assert first_dir != n.worker_dir
        c.stop()
Example #30
0
def test__read_text(e, s, a, b):
    with make_hdfs() as hdfs:
        with hdfs.open('/tmp/test/text.1.txt', 'wb') as f:
            f.write('Alice 100\nBob 200\nCharlie 300'.encode())

        with hdfs.open('/tmp/test/text.2.txt', 'wb') as f:
            f.write('Dan 400\nEdith 500\nFrank 600'.encode())

        with hdfs.open('/tmp/test/other.txt', 'wb') as f:
            f.write('a b\nc d'.encode())

        b = db.read_text('hdfs:///tmp/test/text.*.txt',
                         collection=True)
        yield gen.sleep(0.5)
        assert not s.tasks

        import dask
        b.compute(get=dask.get)

        future = e.compute(b.str.strip().str.split().map(len))
        yield gen.sleep(0.5)
        result = yield future._result()
        assert result == [2, 2, 2, 2, 2, 2]

        b = db.read_text('hdfs:///tmp/test/other.txt', collection=True)
        b = e.persist(b)
        future = e.compute(b.str.split().concat())
        result = yield future._result()
        assert result == ['a', 'b', 'c', 'd']

        L = db.read_text('hdfs:///tmp/test/text.*.txt', collection=False)
        assert all(isinstance(x, Delayed) for x in L)
Example #31
0
def test_learn_occupancy_2(c, s, a, b):
    future = c.map(slowinc, range(1000), delay=0.2)
    while not any(ts.who_has for ts in s.tasks.values()):
        yield gen.sleep(0.01)

    assert 100 < s.total_occupancy < 1000
Example #32
0
def test_retire_workers_close(c, s, a, b):
    yield s.retire_workers(close_workers=True)
    assert not s.workers
    while a.status != "closed" and b.status != "closed":
        yield gen.sleep(0.01)
Example #33
0
 def _stop_worker_gently(self, worker):
     yield self.scheduler.close_worker(worker=worker)
     yield gen.sleep(0.9)
     yield self.stop_worker(worker)
Example #34
0
 def do_test(self):
     yield gen.sleep(3)
     raise gen.Return('test')
Example #35
0
    def printresults():
        logger.log(
            STATS, '{} {} {} {} {}'.format(start.strftime("%Y-%m-%d %H:%M:%S"),
                                           end.strftime("%Y-%m-%d %H:%M:%S"),
                                           runtime, queryPerSec, queryratio))

        querypermin = queryPerSec * 60
        endtime = datetime.now(timezone('UTC')) + timedelta(minutes=runtime)
        line = list()
        popularitylist = list()
        newquerylist = list()

        if filename != "":
            newquerylist = QueryGenerator.generateQueriesFromFile(
                start, end, querypermin * runtime, timeAccessGenerator,
                periodAccessGenerator, querytype, queryratio, filename)
        elif isbatch == True:
            newquerylist = QueryGenerator.generateQueries(
                start, end, querypermin * runtime, timeAccessGenerator,
                periodAccessGenerator, popularitylist, querytype, queryratio,
                logger)
        else:
            #logger.info("Run.py start queryendtime "+str(start)+", "+str(endtime))
            queryStartInterval = start
            queryEndInterval = start + timedelta(minutes=1)
            for i in range(0, runtime):
                logger.info("Start generating queries for interval " +
                            str(queryStartInterval) + " - " +
                            str(queryEndInterval))
                newquerylist.extend(
                    QueryGenerator.generateQueries(
                        queryStartInterval, queryEndInterval, querypermin,
                        timeAccessGenerator, periodAccessGenerator,
                        popularitylist, querytype, queryratio, logger))
                queryEndInterval = queryEndInterval + timedelta(minutes=1)

            logger.info("Finished generating queries. num queries generated " +
                        str(len(newquerylist)))

        if filename != "" or isbatch == True:
            count = 0
            time = datetime.now(timezone('UTC'))
            logger.info("Time: {}".format(time.strftime("%Y-%m-%d %H:%M:%S")))
            nextminute = time + timedelta(minutes=1)
            for query in newquerylist:
                try:
                    line.append(
                        applyOperation(query, config, brokernameurl, logger))
                except Exception as inst:
                    logger.error(type(inst))  # the exception instance
                    logger.error(inst.args)  # arguments stored in .args
                    logger.error(
                        inst)  # __str__ allows args to be printed directly
                    x, y = inst.args
                    logger.error('x =', x)
                    logger.error('y =', y)

                count = count + 1
                if count >= querypermin:
                    timediff = (nextminute -
                                datetime.now(timezone('UTC'))).total_seconds()
                    if timediff > 0:
                        yield gen.sleep(timediff)
                    count = 0
                    time = datetime.now(timezone('UTC'))
                    logger.info("Time: {}".format(
                        time.strftime("%Y-%m-%d %H:%M:%S")))
                    nextminute = time + timedelta(minutes=1)
        else:
            # frequency of queries per millisecond
            queryPerMilliSecond = float(queryPerSec) / 1000
            # number of samples spaced by 1 millisecond
            numSamples = runtime * 60 * 1000
            numQueries, querySchedule = genPoissonQuerySchedule(
                queryPerMilliSecond, numSamples)
            logger.info("Poisson numQueries = " + str(numQueries))

            queryScheduleIdx = 0
            count = 0
            while count < len(newquerylist):
                sample = querySchedule[queryScheduleIdx]
                #logger.info("Poisson sample is "+str(sample[0])+", "+str(sample[1]))
                if (sample[0] == 0):
                    #logger.info("Sleeping for "+str(sample[1]))
                    yield gen.sleep(
                        float(sample[1]) /
                        1000)  # divide by 1000 to convert it into seconds
                else:
                    for i in range(0, sample[0]):
                        try:
                            line.append(
                                applyOperation(newquerylist[count], config,
                                               brokernameurl, logger))
                            #applyOperation(newquerylist[count], config, brokernameurl, logger)
                            newquerylist[count].setTxTime(datetime.now())
                            #logger.info("Running query "+str(sample[0]))
                        except Exception as inst:
                            logger.error(type(inst))  # the exception instance
                            logger.error(
                                inst.args)  # arguments stored in .args
                            logger.error(
                                inst
                            )  # __str__ allows args to be printed directly
                        count = count + 1
                        if count >= len(newquerylist):
                            break
                queryScheduleIdx = queryScheduleIdx + 1

        wait_iterator = gen.WaitIterator(*line)
        while not wait_iterator.done():
            try:
                result = yield wait_iterator.next()
            except Exception as e:
                logger.error("Error {} from {}".format(
                    e, wait_iterator.current_future))
Example #36
0
    def callback(self, back_result):

        master = self.master

        if back_result == '9':
            self.refund()
            self.update_upstream(refund=True)

        try:
            self.back_result = str(self.back_result or self.up_back_result
                                   or self.up_result or back_result)
            master.hset('order:%s' % self.order_id, 'back_result',
                        self.back_result)

        except Exception as e:
            root_log.error("SET", e)

        downstream = self.application.config['downstream'][self.user_id]

        url = self.back_url
        back_time = time.localtime()

        if self.product == 'data':
            body = json.dumps({
                'order_id':
                self.sp_order_id,
                'transactionid':
                self.order_id,
                'orderstatus': ((back_result == '1') and 'finish') or 'fail',
                'result_code':
                self.back_result,
                'plat_offer_id':
                self.plat_offer_id,
                'facevalue':
                self.price,
                'phone_id':
                self.mobile,
                'ordertime':
                time.strftime("%Y-%m-%d %H:%M:%S", back_time)
            })

        else:
            body = 'userid=%s&orderid=%s&sporderid=%s&merchantsubmittime=%s&resultno=%s' % (
                self.user_id, self.order_id, self.sp_order_id,
                time.strftime("%Y%m%d%H%M%S", back_time), back_result)

            sign = signature(body + '&key=' + downstream['key'])

            body += "&sign=" + sign

        request_log.info('CALLBACK %s - %s' % (url, body),
                         extra={'orderid': self.order_id})

        h = None
        if downstream.get('content'):
            h = {'Content-Type': 'application/json;charset=UTF-8'}

        for i in range(3):
            http_client = AsyncHTTPClient()
            try:
                response = yield http_client.fetch(url,
                                                   method='POST',
                                                   headers=h,
                                                   body=body)

                if response and response.code == 200:
                    break

            except Exception as e:
                request_log.warn('CALLBACK FAIL - %s',
                                 e,
                                 extra={'orderid': self.order_id})
            finally:
                http_client.close()
            # wait for 5*i secs
            yield gen.sleep(60 * (i + 1))

        # finish order
        back_time = time.localtime()
        try:
            master.hset('order:%s' % self.order_id, 'back_time',
                        time.mktime(back_time))
            master.sadd('list:finish', self.order_id)
            master.srem('list:create', self.order_id)
        except Exception as e:
            root_log.error("MOVE ORDER %s", e)
Example #37
0
 def get(self):
     self.write("asdf")
     self.flush()
     # Wait a bit to ensure the chunks are sent and received separately.
     yield gen.sleep(0.01)
     self.write("qwer")
Example #38
0
def ping():
    """print dots to indicate idleness"""
    while True:
        yield gen.sleep(0.25)
        print('.')
Example #39
0
 def f(dask_worker=None):
     yield gen.sleep(0.001)
     raise gen.Return(dask_worker.id)
Example #40
0
def test_heartbeats(c, s, a, b):
    pytest.importorskip('psutil')
    start = time()
    while not all(s.worker_info[w].get('memory-rss') for w in s.workers):
        yield gen.sleep(0.01)
        assert time() < start + 2
Example #41
0
 def test_wait_timeout(self):
     c = locks.Condition()
     wait = c.wait(timedelta(seconds=0.01))
     self.io_loop.call_later(0.02, c.notify)  # Too late.
     yield gen.sleep(0.03)
     self.assertFalse((yield wait))
Example #42
0
 def f(index):
     with (yield sem.acquire()):
         history.append('acquired %d' % index)
         yield gen.sleep(0.01)
         history.append('release %d' % index)
Example #43
0
 async def start(self, rate: int = 32):  # Match client rate
     self.prev_time = time.time()
     while True:
         timer = gen.sleep(1 / rate)
         await self.tick()
         await timer
Example #44
0
 def f(x, y):
     yield gen.sleep(x)
     raise gen.Return(y + 1)
Example #45
0
def geninc(x, delay=0.02):
    yield gen.sleep(delay)
    raise gen.Return(x + 1)
Example #46
0
  def _start_instance(self, version, port):
    """ Starts a Google App Engine application on this machine. It
        will start it up and then proceed to fetch the main page.

    Args:
      version: A Version object.
      port: An integer specifying a port to use.
    """
    version_details = version.version_details
    runtime = version_details['runtime']
    env_vars = version_details.get('envVariables', {})
    runtime_params = self._deployment_config.get_config('runtime_parameters')
    max_memory = runtime_params.get('default_max_appserver_memory',
                                    DEFAULT_MAX_APPSERVER_MEMORY)
    if 'instanceClass' in version_details:
      max_memory = INSTANCE_CLASSES.get(version_details['instanceClass'],
                                        max_memory)

    source_archive = version_details['deployment']['zip']['sourceUrl']

    api_server_port = yield self._ensure_api_server(version.project_id)
    yield self._source_manager.ensure_source(
      version.revision_key, source_archive, runtime)

    logger.info('Starting {}:{}'.format(version, port))

    pidfile = PIDFILE_TEMPLATE.format(revision=version.revision_key, port=port)

    if runtime == GO:
      env_vars['GOPATH'] = os.path.join(UNPACK_ROOT, version.revision_key,
                                        'gopath')
      env_vars['GOROOT'] = os.path.join(GO_SDK, 'goroot')

    watch = ''.join([MONIT_INSTANCE_PREFIX, version.revision_key])
    if runtime in (PYTHON27, GO, PHP):
      start_cmd = create_python27_start_cmd(
        version.project_id,
        self._login_server,
        port,
        pidfile,
        version.revision_key,
        api_server_port)
      env_vars.update(create_python_app_env(self._login_server,
                                            version.project_id))
    elif runtime == JAVA:
      # Account for MaxPermSize (~170MB), the parent process (~50MB), and thread
      # stacks (~20MB).
      max_heap = max_memory - 250
      if max_heap <= 0:
        raise BadConfigurationException(
          'Memory for Java applications must be greater than 250MB')

      start_cmd = create_java_start_cmd(
        version.project_id,
        port,
        self._login_server,
        max_heap,
        pidfile,
        version.revision_key,
        api_server_port
      )

      env_vars.update(create_java_app_env(self._deployment_config))
    else:
      raise BadConfigurationException(
        'Unknown runtime {} for {}'.format(runtime, version.project_id))

    logging.info("Start command: " + str(start_cmd))
    logging.info("Environment variables: " + str(env_vars))

    monit_app_configuration.create_config_file(
      watch,
      start_cmd,
      pidfile,
      port,
      env_vars,
      max_memory,
      self._syslog_server,
      check_port=True,
      kill_exceeded_memory=True)

    full_watch = '{}-{}'.format(watch, port)

    yield self._monit_operator.reload(self._thread_pool)

    # The reload command does not block, and we don't have a good way to check
    # if Monit is ready with its new configuration yet. If the daemon begins
    # reloading while it is handling the 'start', it can end up in a state
    # where it never starts the process. As a temporary workaround, this
    # small period allows it to finish reloading. This can be removed if
    # instances are started inside a cgroup.
    yield gen.sleep(0.5)
    yield self._monit_operator.send_command_retry_process(full_watch, 'start')

    # Make sure the version registration node exists.
    self._zk_client.ensure_path(
      '/'.join([VERSION_REGISTRATION_NODE, version.version_key]))

    instance = Instance(version.revision_key, port)
    yield self._add_routing(instance)

    if version.project_id == DASHBOARD_PROJECT_ID:
      log_size = DASHBOARD_LOG_SIZE
    else:
      log_size = APP_LOG_SIZE

    if not setup_logrotate(version.project_id, log_size):
      logging.error("Error while setting up log rotation for application: {}".
                    format(version.project_id))
Example #47
0
 def f():
     yield gen.sleep(1)
Example #48
0
def test_non_existent_worker(c, s):
    s.add_worker(address='127.0.0.1:5738', ncores=2, nbytes={}, host_info={})
    futures = c.map(inc, range(10))
    yield gen.sleep(4)
    assert not s.workers
    assert all(ts.state == 'no-worker' for ts in s.tasks.values())
Example #49
0
def test_basic(c, s, a, b):
    X, y = make_classification(n_samples=1000, n_features=5, chunks=100)
    model = SGDClassifier(tol=1e-3, penalty="elasticnet")

    params = {"alpha": np.logspace(-2, 1, num=50), "l1_ratio": [0.01, 1.0]}

    X_test, y_test = X[:100], y[:100]
    X_train = X[100:]
    y_train = y[100:]

    n_parameters = 5
    param_list = list(ParameterSampler(params, n_parameters))

    def additional_calls(info):
        pf_calls = {k: v[-1]["partial_fit_calls"] for k, v in info.items()}
        ret = {k: int(calls < 10) for k, calls in pf_calls.items()}
        if len(ret) == 1:
            return {list(ret)[0]: 0}

        # Don't train one model
        some_keys = set(ret.keys()) - {0}
        del ret[random.choice(list(some_keys))]
        return ret

    info, models, history, best = yield fit(
        model,
        param_list,
        X_train,
        y_train,
        X_test,
        y_test,
        additional_calls,
        fit_params={"classes": [0, 1]},
    )

    # Ensure that we touched all data
    keys = {t[0] for t in s.transition_log}
    L = [str(k) in keys for kk in X_train.__dask_keys__() for k in kk]
    assert all(L)

    for model in models.values():
        assert isinstance(model, Future)
        model2 = yield model
        assert isinstance(model2, SGDClassifier)
    XX_test, yy_test = yield c.compute([X_test, y_test])
    model = yield models[0]
    assert model.score(XX_test, yy_test) == info[0][-1]["score"]

    # `<` not `==` because we randomly dropped one model
    assert len(history) < n_parameters * 10
    for h in history:
        assert {
            "partial_fit_time",
            "score_time",
            "score",
            "model_id",
            "params",
            "partial_fit_calls",
        }.issubset(set(h.keys()))

    groups = toolz.groupby("partial_fit_calls", history)
    assert len(groups[1]) > len(groups[2]) > len(groups[3]) > len(
        groups[max(groups)])
    assert max(groups) == n_parameters

    keys = list(models.keys())
    for key in keys:
        del models[key]

    while c.futures or s.tasks:  # Cleans up cleanly after running
        yield gen.sleep(0.01)

    # smoke test for ndarray X_test and y_test
    X_test, y_test = yield c.compute([X_test, y_test])
    info, models, history, best = yield fit(
        model,
        param_list,
        X_train,
        y_train,
        X_test,
        y_test,
        additional_calls,
        fit_params={"classes": [0, 1]},
    )
 def get(self):
     yield gen.sleep(10)
     print 'NoBlockingHnadler', datetime.datetime.now().strftime(
         "%Y-%m-%d %H:%M:%S")
     self.write('NoBlockingHnadler Request')
Example #51
0
def minute_loop(logic):
    while True:
        print("logged players:", logic.players)
        yield gen.sleep(20)
Example #52
0
 def sleep_some():
     self.io_loop.run_sync(lambda: gen.sleep(0.05))
Example #53
0
    def get(self):
        try:
            yield self.git_lock.acquire(1)
        except gen.TimeoutError:
            self.emit({
                'phase':
                'error',
                'message':
                'Another git operations is currently running, try again in a few minutes'
            })
            return

        try:
            repo = self.get_argument('repo')
            branch = self.get_argument('branch')
            repo_dir = repo.split('/')[-1]

            # We gonna send out event streams!
            self.set_header('content-type', 'text/event-stream')
            self.set_header('cache-control', 'no-cache')

            gp = GitPuller(repo, branch, repo_dir)

            q = Queue()

            def pull():
                try:
                    for line in gp.pull():
                        q.put_nowait(line)
                    # Sentinel when we're done
                    q.put_nowait(None)
                except Exception as e:
                    q.put_nowait(e)
                    raise e

            self.gp_thread = threading.Thread(target=pull)

            self.gp_thread.start()

            while True:
                try:
                    progress = q.get_nowait()
                except Empty:
                    yield gen.sleep(0.5)
                    continue
                if progress is None:
                    break
                if isinstance(progress, Exception):
                    self.emit({
                        'phase':
                        'error',
                        'message':
                        str(progress),
                        'output':
                        '\n'.join([
                            l.strip() for l in traceback.format_exception(
                                type(progress), progress,
                                progress.__traceback__)
                        ])
                    })
                    return

                self.emit({'output': progress, 'phase': 'syncing'})

            self.emit({'phase': 'finished'})
        except Exception as e:
            self.emit({
                'phase':
                'error',
                'message':
                str(e),
                'output':
                '\n'.join([
                    l.strip() for l in traceback.format_exception(
                        type(e), e, e.__traceback__)
                ])
            })
        finally:
            self.git_lock.release()
Example #54
0
def test_explicit(c, s, a, b):
    X, y = make_classification(n_samples=1000, n_features=10, chunks=(200, 10))
    model = SGDClassifier(tol=1e-3, penalty="elasticnet")
    params = [{"alpha": 0.1}, {"alpha": 0.2}]

    def additional_calls(scores):
        """ Progress through predefined updates, checking along the way """
        ts = scores[0][-1]["partial_fit_calls"]
        ts -= 1  # partial_fit_calls = time step + 1
        if ts == 0:
            assert len(scores) == len(params)
            assert len(scores[0]) == 1
            assert len(scores[1]) == 1
            return {k: 2 for k in scores}
        if ts == 2:
            assert len(scores) == len(params)
            assert len(scores[0]) == 2
            assert len(scores[1]) == 2
            return {0: 1, 1: 0}
        elif ts == 3:
            assert len(scores) == len(params)
            assert len(scores[0]) == 3
            assert len(scores[1]) == 2
            return {0: 3}
        elif ts == 6:
            assert len(scores) == 1
            assert len(scores[0]) == 4
            return {0: 0}
        else:
            raise Exception()

    info, models, history, best = yield fit(
        model,
        params,
        X,
        y,
        X.blocks[-1],
        y.blocks[-1],
        additional_calls,
        scorer=None,
        fit_params={"classes": [0, 1]},
    )
    assert all(model.done() for model in models.values())

    models = yield models
    model = models[0]
    meta = info[0][-1]

    assert meta["params"] == {"alpha": 0.1}
    assert meta["partial_fit_calls"] == 6 + 1
    assert len(info) > len(models) == 1
    assert set(models.keys()).issubset(set(info.keys()))
    assert meta["partial_fit_calls"] == history[-1]["partial_fit_calls"]
    calls = {
        k: [h["partial_fit_calls"] for h in hist]
        for k, hist in info.items()
    }
    for k, call in calls.items():
        assert (np.diff(call) >= 1).all()
    assert set(models.keys()) == {0}
    del models[0]

    while s.tasks or c.futures:  # all data clears out
        yield gen.sleep(0.01)
Example #55
0
class ProcessManager(object):
    '''
    A class which will manage processes that should be running
    '''
    def __init__(self, name=None, wait_for_kill=1):
        # pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
        self._process_map = {}

        self.name = name
        if self.name is None:
            self.name = self.__class__.__name__

        self.wait_for_kill = wait_for_kill

        # store some pointers for the SIGTERM handler
        self._pid = os.getpid()
        self._sigterm_handler = signal.getsignal(signal.SIGTERM)
        self._restart_processes = True

    def add_process(self, tgt, args=None, kwargs=None, name=None):
        '''
        Create a processes and args + kwargs
        This will deterimine if it is a Process class, otherwise it assumes
        it is a function
        '''
        if args is None:
            args = []

        if kwargs is None:
            kwargs = {}

        if salt.utils.platform.is_windows():
            # Need to ensure that 'log_queue' is correctly transferred to
            # processes that inherit from 'MultiprocessingProcess'.
            if type(MultiprocessingProcess) is type(tgt) and (issubclass(
                    tgt, MultiprocessingProcess)):
                need_log_queue = True
            else:
                need_log_queue = False

            if need_log_queue and 'log_queue' not in kwargs:
                if hasattr(self, 'log_queue'):
                    kwargs['log_queue'] = self.log_queue
                else:
                    kwargs['log_queue'] = (
                        salt.log.setup.get_multiprocessing_logging_queue())

        # create a nicer name for the debug log
        if name is None:
            if isinstance(tgt, types.FunctionType):
                name = '{0}.{1}'.format(
                    tgt.__module__,
                    tgt.__name__,
                )
            else:
                name = '{0}{1}.{2}'.format(
                    tgt.__module__,
                    '.{0}'.format(tgt.__class__)
                    if str(tgt.__class__) != "<type 'type'>" else '',
                    tgt.__name__,
                )

        if type(multiprocessing.Process) is type(tgt) and issubclass(
                tgt, multiprocessing.Process):
            process = tgt(*args, **kwargs)
        else:
            process = multiprocessing.Process(target=tgt,
                                              args=args,
                                              kwargs=kwargs,
                                              name=name)

        if isinstance(process, SignalHandlingMultiprocessingProcess):
            with default_signals(signal.SIGINT, signal.SIGTERM):
                process.start()
        else:
            process.start()
        log.debug("Started '{0}' with pid {1}".format(name, process.pid))
        self._process_map[process.pid] = {
            'tgt': tgt,
            'args': args,
            'kwargs': kwargs,
            'Process': process
        }
        return process

    def restart_process(self, pid):
        '''
        Create new process (assuming this one is dead), then remove the old one
        '''
        if self._restart_processes is False:
            return
        log.info('Process {0} ({1}) died with exit status {2},'
                 ' restarting...'.format(
                     self._process_map[pid]['tgt'], pid,
                     self._process_map[pid]['Process'].exitcode))
        # don't block, the process is already dead
        self._process_map[pid]['Process'].join(1)

        self.add_process(self._process_map[pid]['tgt'],
                         self._process_map[pid]['args'],
                         self._process_map[pid]['kwargs'])

        del self._process_map[pid]

    def stop_restarting(self):
        self._restart_processes = False

    def send_signal_to_processes(self, signal_):
        if (salt.utils.platform.is_windows()
                and signal_ in (signal.SIGTERM, signal.SIGINT)):
            # On Windows, the subprocesses automatically have their signal
            # handlers invoked. If you send one of these signals while the
            # signal handler is running, it will kill the process where it
            # is currently running and the signal handler will not finish.
            # This will also break the process tree: children of killed
            # children will become parentless and not findable when trying
            # to kill the process tree (they don't inherit their parent's
            # parent). Hence the 'MWorker' processes would be left over if
            # the 'ReqServer' process is killed this way since 'taskkill'
            # with the tree option will not be able to find them.
            return

        for pid in six.iterkeys(self._process_map.copy()):
            try:
                os.kill(pid, signal_)
            except OSError as exc:
                if exc.errno not in (errno.ESRCH, errno.EACCES):
                    # If it's not a "No such process" error, raise it
                    raise
                # Otherwise, it's a dead process, remove it from the process map
                del self._process_map[pid]

    @gen.coroutine
    def run(self, async=False):
        '''
        Load and start all available api modules
        '''
        log.debug('Process Manager starting!')
        salt.utils.appendproctitle(self.name)

        # make sure to kill the subprocesses if the parent is killed
        if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
            # There are no SIGTERM handlers installed, install ours
            signal.signal(signal.SIGTERM, self.kill_children)
        if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
            # There are no SIGINT handlers installed, install ours
            signal.signal(signal.SIGINT, self.kill_children)

        while True:
            log.trace('Process manager iteration')
            try:
                # in case someone died while we were waiting...
                self.check_children()
                # The event-based subprocesses management code was removed from here
                # because os.wait() conflicts with the subprocesses management logic
                # implemented in `multiprocessing` package. See #35480 for details.
                if async:
                    yield gen.sleep(10)
                else:
                    time.sleep(10)
                if len(self._process_map) == 0:
                    break
            # OSError is raised if a signal handler is called (SIGTERM) during os.wait
            except OSError:
                break
            except IOError as exc:
                # IOError with errno of EINTR (4) may be raised
                # when using time.sleep() on Windows.
                if exc.errno != errno.EINTR:
                    raise
                break
Example #56
0
def generate_sequence():
    for number in fibonacci():
        store.submit(number)
        yield gen.sleep(1)
        if finish.is_set():
            break
Example #57
0
 def cb(self):
     while True:
         L, self.buffer = self.buffer, []
         self.last = self.emit(L)
         yield self.last
         yield gen.sleep(self.interval)
Example #58
0
    def get(self):
        try:
            id = self.get_argument('id')

            # We gonna send out event streams!
            self.set_header('content-type', 'text/event-stream')
            self.set_header('cache-control', 'no-cache')

            hs = HSPuller(id, self.settings['hydroshare'])

            q = Queue()

            def pull():
                try:
                    for line in hs.pull():
                        q.put_nowait(line)
                    # Sentinel when we're done
                    q.put_nowait(None)
                except Exception as e:
                    q.put_nowait(e)
                    raise e

            self.hs_thread = threading.Thread(target=pull)

            self.hs_thread.start()

            while True:
                try:
                    progress = q.get_nowait()
                except Empty:
                    yield gen.sleep(0.5)
                    continue
                if progress is None:
                    break
                if isinstance(progress, Exception):
                    self.emit({
                        'phase':
                        'error',
                        'message':
                        str(progress),
                        'output':
                        '\n'.join([
                            l.strip() for l in traceback.format_exception(
                                type(progress), progress,
                                progress.__traceback__)
                        ])
                    })
                    return

                self.emit({'output': progress, 'phase': 'syncing'})

            self.emit({'phase': 'finished'})
        except Exception as e:
            self.emit({
                'phase':
                'error',
                'message':
                str(e),
                'output':
                '\n'.join([
                    l.strip() for l in traceback.format_exception(
                        type(e), e, e.__traceback__)
                ])
            })
Example #59
0
 def run():
     yield [n._start(addr) for n in nannies]
     while all(n.status != 'closed' for n in nannies):
         yield gen.sleep(0.2)
Example #60
0
 def worker():
     while True:
         item = yield q.get()
         self.accumulator += item
         q.task_done()
         yield gen.sleep(random() * 0.01)