示例#1
0
 def _orchestrator_rest_worker(self):
     error_count = 0
     got = None
     while True:
         try:
             get_url = self.orchestrator_rest_url + \
                 '/actions/' + self.entity_id
             LOG.debug('GET %s', get_url)
             got = requests.get(get_url)
             got_jsdict = got.json()
             action = ActionBase.dispatch_from_jsondict(got_jsdict)
             LOG.debug('got %s', action.uuid)
             delete_url = get_url + '/' + action.uuid
             LOG.debug('DELETE %s', delete_url)
             deleted = requests.delete(delete_url)
             assert deleted.status_code == 200
             self.on_recv_action_from_orchestrator(action)
             error_count = 0
         except Exception as e:
             LOG.error('cannot HTTP GET', exc_info=True)
             if got is not None:
                 LOG.error('Got: %s', got.text)
             error_count += 1
             eventlet.sleep(error_count * 1.0)
         got = None
    def test_memcached_driver(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        hostkey = str("%s:%s" % (self._topic, self._host))
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=self.down_time)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)

        self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
        serv.stop()
        eventlet.sleep(self.down_time + 1)
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
示例#3
0
def get_socket(conf, default_port=8080):
    """Bind socket to bind ip:port in conf

    :param conf: Configuration dict to read settings from
    :param default_port: port to use if not specified in conf

    :returns : a socket object as returned from socket.listen or
               ssl.wrap_socket if conf specifies cert_file
    """
    bind_addr = (conf.get("bind_ip", "0.0.0.0"), int(conf.get("bind_port", default_port)))
    address_family = [
        addr[0]
        for addr in socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
        if addr[0] in (socket.AF_INET, socket.AF_INET6)
    ][0]
    sock = None
    retry_until = time.time() + 30
    while not sock and time.time() < retry_until:
        try:
            sock = listen(bind_addr, backlog=int(conf.get("backlog", 4096)), family=address_family)
            if "cert_file" in conf:
                sock = ssl.wrap_socket(sock, certfile=conf["cert_file"], keyfile=conf["key_file"])
        except socket.error, err:
            if err.args[0] != errno.EADDRINUSE:
                raise
            sleep(0.1)
示例#4
0
    def test_waiters_get_woken(self):
        # verify that when there's someone waiting on an empty pool
        # and someone puts an immediately-closed connection back in
        # the pool that the waiter gets woken
        self.pool.put(self.connection)
        self.pool.clear()
        self.pool = self.create_pool(max_size=1, max_age=0)

        self.connection = self.pool.get()
        self.assertEqual(self.pool.free(), 0)
        self.assertEqual(self.pool.waiting(), 0)
        e = event.Event()
        def retrieve(pool, ev):
            c = pool.get()
            ev.send(c)
        eventlet.spawn(retrieve, self.pool, e)
        eventlet.sleep(0) # these two sleeps should advance the retrieve
        eventlet.sleep(0) # coroutine until it's waiting in get()
        self.assertEqual(self.pool.free(), 0)
        self.assertEqual(self.pool.waiting(), 1)
        self.pool.put(self.connection)
        timer = eventlet.Timeout(1)
        conn = e.wait()
        timer.cancel()
        self.assertEqual(self.pool.free(), 0)
        self.assertEqual(self.pool.waiting(), 0)
        self.pool.put(conn)
示例#5
0
            def publisher(waiter):
                LOG.info(_("Creating proxy for topic: %s"), topic)

                try:
                    # The topic is received over the network,
                    # don't trust this input.
                    if self.badchars.search(topic) is not None:
                        emsg = _("Topic contained dangerous characters.")
                        LOG.warn(emsg)
                        raise RPCException(emsg)

                    out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
                                         (ipc_dir, topic),
                                         sock_type, bind=True)
                except RPCException:
                    waiter.send_exception(*sys.exc_info())
                    return

                self.topic_proxy[topic] = eventlet.queue.LightQueue(
                    CONF.rpc_zmq_topic_backlog)
                self.sockets.append(out_sock)

                # It takes some time for a pub socket to open,
                # before we can have any faith in doing a send() to it.
                if sock_type == zmq.PUB:
                    eventlet.sleep(.5)

                waiter.send(True)

                while(True):
                    data = self.topic_proxy[topic].get()
                    out_sock.send(data)
                    LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
                              {'data': data})
示例#6
0
 def _periodic_resync_helper(self):
     """Resync the dhcp state at the configured interval."""
     while True:
         eventlet.sleep(self.conf.resync_interval)
         if self.needs_resync:
             self.needs_resync = False
             self.sync_state()
示例#7
0
    def connect(*args, **ckwargs):
        if kwargs.get('slow_connect', False):
            sleep(0.1)
        if 'give_content_type' in kwargs:
            if len(args) >= 7 and 'Content-Type' in args[6]:
                kwargs['give_content_type'](args[6]['Content-Type'])
            else:
                kwargs['give_content_type']('')
        if 'give_connect' in kwargs:
            kwargs['give_connect'](*args, **ckwargs)
        status = code_iter.next()
        if isinstance(status, tuple):
            status, expect_status = status
        else:
            expect_status = status
        etag = etag_iter.next()
        headers = headers_iter.next()
        timestamp = timestamps_iter.next()

        if status <= 0:
            raise HTTPException()
        if body_iter is None:
            body = static_body or ''
        else:
            body = body_iter.next()
        return FakeConn(status, etag, body=body, timestamp=timestamp,
                        expect_status=expect_status, headers=headers)
示例#8
0
def do_stat(path):
    serr = None
    for i in range(0, _STAT_ATTEMPTS):
        try:
            stats = os.stat(path)
        except OSError as err:
            if err.errno == errno.EIO:
                # Retry EIO assuming it is a transient error from FUSE after a
                # short random sleep
                serr = err
                sleep(random.uniform(0.001, 0.005))
                continue
            if err.errno == errno.ENOENT:
                stats = None
            else:
                raise GlusterFileSystemOSError(err.errno, '%s, os.stat("%s")[%d attempts]' % (err.strerror, path, i))
        if i > 0:
            logging.warn(
                "fs_utils.do_stat():" " os.stat('%s') retried %d times (%s)", path, i, "success" if stats else "failure"
            )
        return stats
    else:
        raise GlusterFileSystemOSError(
            serr.errno, '%s, os.stat("%s")[%d attempts]' % (serr.strerror, path, _STAT_ATTEMPTS)
        )
示例#9
0
    def _cs_request(self, url, method, **kwargs):
        attempts = 0
        timeout = 1
        while True:
            attempts += 1
            try:
                resp, body = self.request(self.base_url + url, method,
                                          **kwargs)
                return resp, body
            except (exceptions.BadRequest,
                    requests.exceptions.RequestException,
                    exceptions.ClientException) as e:
                if attempts > self.retries:
                    raise

                self._logger.debug("Request error: %s" % six.text_type(e))

            self._logger.debug(
                "Failed attempt(%(current)s of %(total)s), "
                " retrying in %(sec)s seconds" % {
                    'current': attempts,
                    'total': self.retries,
                    'sec': timeout
                })
            sleep(timeout)
            timeout *= 2
示例#10
0
    def _backup_chunk(self, backup, container, data, data_offset,
                      object_meta, extra_metadata):
        """Backup data chunk based on the object metadata and offset."""
        object_prefix = object_meta['prefix']
        object_list = object_meta['list']

        object_id = object_meta['id']
        object_name = '%s-%05d' % (object_prefix, object_id)
        obj = {}
        obj[object_name] = {}
        obj[object_name]['offset'] = data_offset
        obj[object_name]['length'] = len(data)
        LOG.debug('Backing up chunk of data from volume.')
        algorithm, output_data = self._prepare_output_data(data)
        obj[object_name]['compression'] = algorithm
        LOG.debug('About to put_object')
        with self.get_object_writer(
                container, object_name, extra_metadata=extra_metadata
        ) as writer:
            writer.write(output_data)
        md5 = hashlib.md5(data).hexdigest()
        obj[object_name]['md5'] = md5
        LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
                  {'object_name': object_name, 'md5': md5})
        object_list.append(obj)
        object_id += 1
        object_meta['list'] = object_list
        object_meta['id'] = object_id

        LOG.debug('Calling eventlet.sleep(0)')
        eventlet.sleep(0)
示例#11
0
    def delete_backup(self, backup):
        """Delete the given backup."""
        container = backup['container']
        object_prefix = backup['service_metadata']
        LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
                  'prefix: %(pre)s.',
                  {'id': backup['id'],
                   'cont': container,
                   'pre': object_prefix})

        if container is not None and object_prefix is not None:
            object_names = []
            try:
                object_names = self._generate_object_names(backup)
            except Exception:
                LOG.warning('Error while listing objects, continuing'
                            ' with delete.')

            for object_name in object_names:
                self.delete_object(container, object_name)
                LOG.debug('deleted object: %(object_name)s'
                          ' in container: %(container)s.',
                          {
                              'object_name': object_name,
                              'container': container
                          })
                # Deleting a backup's objects can take some time.
                # Yield so other threads can run
                eventlet.sleep(0)

        LOG.debug('delete %s finished.', backup['id'])
示例#12
0
文件: utils.py 项目: codeoedoc/swift
def lock_file(filename, timeout=10, append=False, unlink=True):
    """
    Context manager that acquires a lock on a file.  This will block until
    the lock can be acquired, or the timeout time has expired (whichever occurs
    first).

    :param filename: file to be locked
    :param timeout: timeout (in seconds)
    :param append: True if file should be opened in append mode
    :param unlink: True if the file should be unlinked at the end
    """
    flags = os.O_CREAT | os.O_RDWR
    if append:
        flags |= os.O_APPEND
    fd = os.open(filename, flags)
    try:
        with LockTimeout(timeout, filename):
            while True:
                try:
                    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    break
                except IOError, err:
                    if err.errno != errno.EAGAIN:
                        raise
                sleep(0.01)
        mode = 'r+'
        if append:
            mode = 'a+'
        file_obj = os.fdopen(fd, mode)
        yield file_obj
示例#13
0
文件: utils.py 项目: 10171121/cinder
def _transfer_data(src, dest, length, chunk_size):
    """Transfer data between files (Python IO objects)."""

    chunks = int(math.ceil(length / chunk_size))
    remaining_length = length

    LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.",
              {'chunks': chunks, 'bytes': chunk_size})

    for chunk in range(0, chunks):
        before = time.time()
        data = tpool.execute(src.read, min(chunk_size, remaining_length))

        # If we have reached end of source, discard any extraneous bytes from
        # destination volume if trim is enabled and stop writing.
        if data == b'':
            break

        tpool.execute(dest.write, data)
        remaining_length -= len(data)
        delta = (time.time() - before)
        rate = (chunk_size / delta) / units.Ki
        LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).",
                  {'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})

        # yield to any other pending operations
        eventlet.sleep(0)

    tpool.execute(dest.flush)
示例#14
0
文件: utils.py 项目: codeoedoc/swift
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
    '''
    Will eventlet.sleep() for the appropriate time so that the max_rate
    is never exceeded.  If max_rate is 0, will not ratelimit.  The
    maximum recommended rate should not exceed (1000 * incr_by) a second
    as eventlet.sleep() does involve some overhead.  Returns running_time
    that should be used for subsequent calls.

    :param running_time: the running time of the next allowable request. Best
                         to start at zero.
    :param max_rate: The maximum rate per second allowed for the process.
    :param incr_by: How much to increment the counter.  Useful if you want
                    to ratelimit 1024 bytes/sec and have differing sizes
                    of requests. Must be >= 0.
    :param rate_buffer: Number of seconds the rate counter can drop and be
                        allowed to catch up (at a faster than listed rate).
                        A larger number will result in larger spikes in rate
                        but better average accuracy.
    '''
    if not max_rate or incr_by <= 0:
        return running_time
    clock_accuracy = 1000.0
    now = time.time() * clock_accuracy
    time_per_request = clock_accuracy * (float(incr_by) / max_rate)
    if now - running_time > rate_buffer * clock_accuracy:
        running_time = now
    elif running_time - now > time_per_request:
        eventlet.sleep((running_time - now) / clock_accuracy)
    return running_time + time_per_request
示例#15
0
文件: utils.py 项目: codeoedoc/swift
def lock_path(directory, timeout=10):
    """
    Context manager that acquires a lock on a directory.  This will block until
    the lock can be acquired, or the timeout time has expired (whichever occurs
    first).

    For locking exclusively, file or directory has to be opened in Write mode.
    Python doesn't allow directories to be opened in Write Mode. So we
    workaround by locking a hidden file in the directory.

    :param directory: directory to be locked
    :param timeout: timeout (in seconds)
    """
    mkdirs(directory)
    lockpath = '%s/.lock' % directory
    fd = os.open(lockpath, os.O_WRONLY | os.O_CREAT)
    try:
        with LockTimeout(timeout, lockpath):
            while True:
                try:
                    fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    break
                except IOError, err:
                    if err.errno != errno.EAGAIN:
                        raise
                sleep(0.01)
        yield True
示例#16
0
    def _get_socket(self, host, port, backlog):
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
        family = info[0]
        bind_addr = info[-1]

        sock = None
        retry_until = time.time() + 30
        while not sock and time.time() < retry_until:
            try:
                sock = eventlet.listen(bind_addr, backlog=backlog, family=family)
                if sslutils.is_enabled():
                    sock = sslutils.wrap(sock)

            except socket.error as err:
                if err.args[0] != errno.EADDRINUSE:
                    raise
                eventlet.sleep(0.1)
        if not sock:
            raise RuntimeError(
                _("Could not bind to %(host)s:%(port)s " "after trying for 30 seconds") % {"host": host, "port": port}
            )
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # sockets can hang around forever without keepalive
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

        # This option isn't available in the OS X version of eventlet
        if hasattr(socket, "TCP_KEEPIDLE"):
            sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle)

        return sock
    def _get_api_registrations(self, params):
        data = urllib_parse.urlencode(params)
        headers = {}
        headers['Content-Type'] = 'application/x-www-form-urlencoded'

        response = None
        attempts = 0
        while attempts < self.retries:
            try:
                response = requests.request(
                    method='GET',
                    url=self.url,
                    headers=headers,
                    timeout=self.timeout,
                    params=data)
                self.logger.debug('Got repsonse: %s.', response.json())
                break
            except Exception:
                msg = 'Unable to connect to registrations API.'
                self.logger.exception(msg)
                attempts += 1
                eventlet.sleep(self.retry_delay)

        if not response:
            raise Exception('Failed to connect to TypeForm API.')

        if response.status_code != httplib.OK:
            failure_reason = ('Failed to retrieve registrations: %s \
                (status code: %s)' % (response.text, response.status_code))
            self.logger.error(failure_reason)
            raise Exception(failure_reason)

        return response.json()
示例#18
0
def receive_message(user, timeout = 240):
    queue = RPC.create_queue(user, user)
    conn = RPC.create_connection()
    consumer = RPC.create_consumer(user, conn, queue)
    if not consumer.callbacks[0].empty: #若回调函数消息队列非空,则直接从队列中获取消息
        state = user_states.get(user)
        if state is None or state == "offline":
            msg = None
        else:
            msg = consumer.callbacks[0].get_msg()
        RPC.release_connection(conn)
        RPC.release_consumer(user)
        return msg
    SLEEP_INTERVAL = 5
    waited = 0
    while waited < timeout:
        sleep(SLEEP_INTERVAL)
        waited += SLEEP_INTERVAL
        state = user_states.get(user)
        if state is None or state == "offline":
            break
        #conn.drain_events(timeout = timeout)
        msg = consumer.queues[0].get()
        if msg:
            break
    if msg:
        consumer.receive(msg.payload, msg)
        msg = consumer.callbacks[0].get_msg()
    else:
        msg = None
    RPC.release_connection(conn)
    RPC.release_consumer(user)
    return msg
示例#19
0
    def run(self):
        while True:
            eventlet.sleep(0)
            try:
                for data in self.pub_sub.listen():
                    if 'subscribe' == data['type']:
                        continue
                    if 'unsubscribe' == data['type']:
                        continue
                    if 'message' == data['type']:
                        entry = pub_sub_api.unpack_message(data['data'])
                        entry_json = jsonutils.loads(entry)
                        self.db_changes_callback(
                            entry_json['table'],
                            entry_json['key'],
                            entry_json['action'],
                            entry_json['value'],
                            entry_json['topic'])

            except Exception as e:
                LOG.warning(e)
                try:
                    connection = self.pub_sub.connection
                    connection.connect()
                    self.db_changes_callback(None, None, 'sync', None, None)
                except Exception as e:
                    LOG.exception(_LE("reconnect error %(ip)s:%(port)s")
                                  % {'ip': self.ip,
                                     'port': self.plugin_updates_port})
示例#20
0
    def test_recv_during_send(self):
        sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ)
        sleep()

        done = event.Event()

        try:
            SNDHWM = zmq.SNDHWM
        except AttributeError:
            # ZeroMQ <3.0
            SNDHWM = zmq.HWM

        sender.setsockopt(SNDHWM, 10)
        sender.setsockopt(zmq.SNDBUF, 10)

        receiver.setsockopt(zmq.RCVBUF, 10)

        def tx():
            tx_i = 0
            while tx_i <= 1000:
                sender.send(str(tx_i))
                tx_i += 1
            done.send(0)

        spawn(tx)
        final_i = done.wait()
        self.assertEqual(final_i, 0)
示例#21
0
    def test_nested_acquire(self):
        q = zmq._QueueLock()
        self.assertFalse(q)
        q.acquire()
        q.acquire()

        s = semaphore.Semaphore(0)
        results = []

        def lock(x):
            with q:
                results.append(x)
            s.release()

        spawn(lock, 1)
        sleep()
        self.assertEqual(results, [])
        q.release()
        sleep()
        self.assertEqual(results, [])
        self.assertTrue(q)
        q.release()

        s.acquire()
        self.assertEqual(results, [1])
示例#22
0
    def test_send_during_recv(self):
        sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ)
        sleep()

        num_recvs = 30
        done_evts = [event.Event() for _ in range(num_recvs)]

        def slow_rx(done, msg):
            self.assertEqual(sender.recv(), msg)
            done.send(0)

        def tx():
            tx_i = 0
            while tx_i <= 1000:
                sender.send(str(tx_i))
                tx_i += 1

        def rx():
            while True:
                rx_i = receiver.recv()
                if rx_i == "1000":
                    for i in range(num_recvs):
                        receiver.send('done%d' % i)
                    sleep()
                    return

        for i in range(num_recvs):
            spawn(slow_rx, done_evts[i], "done%d" % i)

        spawn(tx)
        spawn(rx)
        for evt in done_evts:
            self.assertEqual(evt.wait(), 0)
示例#23
0
    def test_message_ack_regression(
        self, container, publish, toxiproxy, lock, tracker
    ):
        """ Regression for https://github.com/nameko/nameko/issues/511
        """
        # prevent workers from completing
        lock.acquire()

        # fire entrypoint and block the worker;
        # break connection while the worker is active, then release worker
        with entrypoint_waiter(container, 'echo') as result:
            publish('msg1')
            while not lock._waiters:
                eventlet.sleep()  # pragma: no cover
            toxiproxy.disable()
            # allow connection to close before releasing worker
            eventlet.sleep(.1)
            lock.release()

        # entrypoint will return and attempt to ack initiating message
        assert result.get() == "msg1"

        # enabling connection will re-deliver the initiating message
        # and it will be processed again
        with entrypoint_waiter(container, 'echo') as result:
            toxiproxy.enable()
        assert result.get() == "msg1"

        # connection re-established, container should work again
        with entrypoint_waiter(container, 'echo', timeout=1) as result:
            publish('msg2')
        assert result.get() == 'msg2'
示例#24
0
文件: client.py 项目: edwardt/swift
 def _retry(self, reset_func, func, *args, **kwargs):
     self.attempts = 0
     backoff = self.starting_backoff
     while self.attempts <= self.retries:
         self.attempts += 1
         try:
             if not self.url or not self.token:
                 self.url, self.token = self.get_auth()
                 self.http_conn = None
             if not self.http_conn:
                 self.http_conn = self.http_connection()
             kwargs["http_conn"] = self.http_conn
             rv = func(self.url, self.token, *args, **kwargs)
             return rv
         except (socket.error, HTTPException):
             if self.attempts > self.retries:
                 raise
             self.http_conn = None
         except ClientException, err:
             if self.attempts > self.retries:
                 raise
             if err.http_status == 401:
                 self.url = self.token = None
                 if self.attempts > 1:
                     raise
             elif 500 <= err.http_status <= 599:
                 pass
             else:
                 raise
         sleep(backoff)
         backoff *= 2
         if reset_func:
             reset_func(func, *args, **kwargs)
示例#25
0
 def _update(self):
     last = time.time()
     current = 0
     while not self._finished:
         eventlet.sleep(self._delay)
         target = self._target
         if current == target:
             last = time.time()
             continue
         now = time.time()
         diff = target - current
         sign = self._sign(diff)
         step = int(min(abs(diff), (now - last) * self._step))
         step = sign * max(1, step)
         current = current + step
         if sign != self._sign(current):
             pigpio.write(self._pin_forward, 0)
             pigpio.write(self._pin_reverse, 0)
         pigpio.set_PWM_dutycycle(self._pin_pwm, abs(current))
         if current == 0:
             pigpio.write(self._pin_forward, 0)
             pigpio.write(self._pin_reverse, 0)
         elif current < 0:
             pigpio.write(self._pin_forward, 0)
             pigpio.write(self._pin_reverse, 1)
         else:
             pigpio.write(self._pin_forward, 1)
             pigpio.write(self._pin_reverse, 0)
         last = now
         self._current = current
示例#26
0
    def _job_store_checker(self):
        while not self._stopped:
            LOG.debug(
                "Starting Scheduler Job Store checker [scheduler=%s]...", self
            )

            try:
                self._process_store_jobs()
            except Exception:
                LOG.exception(
                    "Scheduler failed to process delayed calls"
                    " due to unexpected exception."
                )

                # For some mysterious reason (probably eventlet related)
                # the exception is not cleared from the context automatically.
                # This results in subsequent log.warning calls to show invalid
                # info.
                if sys.version_info < (3,):
                    sys.exc_clear()

            eventlet.sleep(
                self._fixed_delay +
                random.Random().randint(0, self._random_delay * 1000) * 0.001
            )
示例#27
0
    def test_blocks_on_pool(self):
        waiter = Queue(0)
        def greedy():
            self.pool.get()
            self.pool.get()
            self.pool.get()
            self.pool.get()
            # No one should be waiting yet.
            self.assertEquals(self.pool.waiting(), 0)
            # The call to the next get will unschedule this routine.
            self.pool.get()
            # So this put should never be called.
            waiter.put('Failed!')

        killable = eventlet.spawn(greedy)

        # no one should be waiting yet.
        self.assertEquals(self.pool.waiting(), 0)

        ## Wait for greedy
        eventlet.sleep(0)

        ## Greedy should be blocking on the last get
        self.assertEquals(self.pool.waiting(), 1)

        ## Send will never be called, so balance should be 0.
        self.assertFalse(not waiter.full())

        eventlet.kill(killable)
示例#28
0
 def tx(sock):
     for i in range(1, 1001):
         msg = "sub%s %s" % ([2, 1][i % 2], i)
         sock.send(msg)
         sleep()
     sock.send('sub1 LAST')
     sock.send('sub2 LAST')
    def test_concurrent_green_lock_succeeds(self):
        """Verify spawn_n greenthreads with two locks run concurrently."""
        tmpdir = tempfile.mkdtemp()
        try:
            self.completed = False

            def locka(wait):
                a = lockutils.InterProcessLock(os.path.join(tmpdir, 'a'))
                with a:
                    wait.wait()
                self.completed = True

            def lockb(wait):
                b = lockutils.InterProcessLock(os.path.join(tmpdir, 'b'))
                with b:
                    wait.wait()

            wait1 = eventlet.event.Event()
            wait2 = eventlet.event.Event()
            pool = greenpool.GreenPool()
            pool.spawn_n(locka, wait1)
            pool.spawn_n(lockb, wait2)
            wait2.send()
            eventlet.sleep(0)
            wait1.send()
            pool.waitall()

            self.assertTrue(self.completed)

        finally:
            if os.path.exists(tmpdir):
                shutil.rmtree(tmpdir)
示例#30
0
    def test_send_during_recv_multipart(self):
        sender, receiver, port = self.create_bound_pair(zmq.XREQ, zmq.XREQ)
        sleep()

        num_recvs = 30
        done_evts = [event.Event() for _ in range(num_recvs)]

        def slow_rx(done, msg):
            self.assertEqual(sender.recv_multipart(), msg)
            done.send(0)

        def tx():
            tx_i = 0
            while tx_i <= 1000:
                sender.send_multipart([str(tx_i), '1', '2', '3'])
                tx_i += 1

        def rx():
            while True:
                rx_i = receiver.recv_multipart()
                if rx_i == ["1000", '1', '2', '3']:
                    for i in range(num_recvs):
                        receiver.send_multipart(['done%d' % i, 'a', 'b', 'c'])
                    sleep()
                    return

        for i in range(num_recvs):
            spawn(slow_rx, done_evts[i], ["done%d" % i, 'a', 'b', 'c'])

        spawn(tx)
        spawn(rx)
        for i in range(num_recvs):
            final_i = done_evts[i].wait()
            self.assertEqual(final_i, 0)
示例#31
0
文件: ircd.py 项目: raylu/outlauth
def ping_all():
	while True:
		for user in list(users.values()):
			user.check_timeout()
		eventlet.sleep(60)
示例#32
0
 def server(sock):
     client, addr = sock.accept()
     eventlet.sleep(0.1)
示例#33
0
 def do_heartbeat():
     while True:
         self.send_heartbeats()
         eventlet.sleep(CONF.matchmaker_heartbeat_freq)
示例#34
0
 def __call__(self, policy, rule, port_thread, buf):
     eventlet.sleep(self.wait_time)
示例#35
0
文件: manager.py 项目: bbc/cinder
 def _wait_for_scheduler(self):
     # NOTE(dulek): We're waiting for scheduler to announce that it's ready
     # or CONF.scheduler_driver_init_wait_time seconds from service startup
     # has passed.
     while self._startup_delay and not self.driver.is_ready():
         eventlet.sleep(1)
示例#36
0
    def _get_socket(self, host, port, backlog):
        bind_addr = (host, port)
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        try:
            info = socket.getaddrinfo(bind_addr[0], bind_addr[1],
                                      socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
            family = info[0]
            bind_addr = info[-1]
        except Exception:
            family = socket.AF_INET

        cert_file = CONF.ssl_cert_file
        key_file = CONF.ssl_key_file
        ca_file = CONF.ssl_ca_file
        use_ssl = cert_file or key_file

        if cert_file and not os.path.exists(cert_file):
            raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)

        if ca_file and not os.path.exists(ca_file):
            raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)

        if key_file and not os.path.exists(key_file):
            raise RuntimeError(_("Unable to find key_file : %s") % key_file)

        if use_ssl and (not cert_file or not key_file):
            raise RuntimeError(
                _("When running server in SSL mode, you must "
                  "specify both a cert_file and key_file "
                  "option value in your configuration file"))

        def wrap_ssl(sock):
            ssl_kwargs = {
                'server_side': True,
                'certfile': cert_file,
                'keyfile': key_file,
                'cert_reqs': ssl.CERT_NONE,
            }

            if CONF.ssl_ca_file:
                ssl_kwargs['ca_certs'] = ca_file
                ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED

            return ssl.wrap_socket(sock, **ssl_kwargs)

        sock = None
        retry_until = time.time() + 30
        while not sock and time.time() < retry_until:
            try:
                sock = eventlet.listen(bind_addr,
                                       backlog=backlog,
                                       family=family)
                if use_ssl:
                    sock = wrap_ssl(sock)

            except socket.error as err:
                if err.args[0] != errno.EADDRINUSE:
                    raise
                eventlet.sleep(0.1)
        if not sock:
            raise RuntimeError(
                _("Could not bind to %(host)s:%(port)s "
                  "after trying for 30 seconds") % {
                      'host': host,
                      'port': port
                  })
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # sockets can hang around forever without keepalive
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

        # This option isn't available in the OS X version of eventlet
        if hasattr(socket, 'TCP_KEEPIDLE'):
            sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
                            CONF.tcp_keepidle)

        return sock
示例#37
0
 def last_out_sends():
     eventlet.sleep(cfg.CONF.send_events_interval)
     self._waiting_to_send = False
     self.send_events()
示例#38
0
crap_events = [
    '.', ' ', ':', ' : |c', 'baditem:1|k', 'baditem:1|c|@', 'baditem:1|c|@wtf',
    'baditem:1|c|@05f.6', 'badtimer:5.0f|ms', 'badgauge:K|g'
]


def bench(payload, limit):
    for i in xrange(1, limit):
        send_event(payload)


print "--> %s" % datetime.now()
for event in combined_events:
    print "Sending [%s]" % event
    send_event(event)
    sleep(.5)
sleep(2)
print "--> %s" % datetime.now()
for event in good_events:
    print "Sending [%s]" % event
    send_event(event)
    sleep(.5)
for event in crap_events:
    print "Sending crap [%s]" % event
    send_event(event)
    sleep(.5)
print "Sending transform [%s]" % transform_test
send_event(transform_test)

print "--> starting benchmark in 5 seconds"
sleep(5)
示例#39
0
    def test_expire(self, tracker, redis, action, ttl, wait_time):
        redis.set('foo', 'bar')
        method = getattr(redis, action)
        method('foo', ttl)

        sleep(TIME_SLEEP)

        call_args_list = [
            call({
                'type': 'psubscribe',
                'pattern': None,
                'channel': '__keyevent@*__:*',
                'data': 1,
            }),
            call({
                'type': 'psubscribe',
                'pattern': None,
                'channel': '__keyspace@*__:*',
                'data': 2,
            }),
            call({
                'type': 'pmessage',
                'pattern': '__keyspace@*__:*',
                'channel': '__keyspace@0__:foo',
                'data': 'set',
            }),
            call({
                'type': 'pmessage',
                'pattern': '__keyevent@*__:*',
                'channel': '__keyevent@0__:set',
                'data': 'foo',
            }),
            call({
                'type': 'pmessage',
                'pattern': '__keyspace@*__:*',
                'channel': '__keyspace@0__:foo',
                'data': 'expire',
            }),
            call({
                'type': 'pmessage',
                'pattern': '__keyevent@*__:*',
                'channel': '__keyevent@0__:expire',
                'data': 'foo',
            }),
        ]
        assert_items_equal(tracker.call_args_list, call_args_list)

        sleep(wait_time)

        call_args_list.extend([
            call({
                'type': 'pmessage',
                'pattern': '__keyspace@*__:*',
                'channel': '__keyspace@0__:foo',
                'data': 'expired',
            }),
            call({
                'type': 'pmessage',
                'pattern': '__keyevent@*__:*',
                'channel': '__keyevent@0__:expired',
                'data': 'foo',
            }),
        ])
        assert_items_equal(tracker.call_args_list, call_args_list)
示例#40
0
def foo(evt, b):
    for i in xrange(1, 10):
        evt.send(b + i)
        evt.reset()
        eventlet.sleep(1)
示例#41
0
 def _sleep(self, wait_time):
     """Sleep for the specified number of seconds."""
     if ENABLE_SLEEP and wait_time is not None:
         logger.debug(_('%s sleeping') % str(self))
         eventlet.sleep(wait_time)
示例#42
0
    def test_listen_multiple_dbs(self, create_service, tracker, redis,
                                 redis_db_1):
        create_service(uri_config_key=URI_CONFIG_KEY,
                       keys='*',
                       events='*',
                       dbs=[0, 1])

        redis.set('foo', '1')
        sleep(TIME_SLEEP)

        call_args_list = [
            call({
                'type': 'psubscribe',
                'pattern': None,
                'channel': '__keyevent@0__:*',
                'data': 1,
            }),
            call({
                'type': 'psubscribe',
                'pattern': None,
                'channel': '__keyevent@1__:*',
                'data': 2,
            }),
            call({
                'type': 'psubscribe',
                'pattern': None,
                'channel': '__keyspace@0__:*',
                'data': 3,
            }),
            call({
                'type': 'psubscribe',
                'pattern': None,
                'channel': '__keyspace@1__:*',
                'data': 4,
            }),
            call({
                'type': 'pmessage',
                'pattern': '__keyspace@0__:*',
                'channel': '__keyspace@0__:foo',
                'data': 'set',
            }),
            call({
                'type': 'pmessage',
                'pattern': '__keyevent@0__:*',
                'channel': '__keyevent@0__:set',
                'data': 'foo',
            }),
        ]
        assert_items_equal(tracker.call_args_list, call_args_list)

        redis_db_1.set('bar', '2')
        sleep(TIME_SLEEP)

        call_args_list.extend([
            call({
                'type': 'pmessage',
                'pattern': '__keyspace@1__:*',
                'channel': '__keyspace@1__:bar',
                'data': 'set',
            }),
            call({
                'type': 'pmessage',
                'pattern': '__keyevent@1__:*',
                'channel': '__keyevent@1__:set',
                'data': 'bar',
            }),
        ])
        assert_items_equal(tracker.call_args_list, call_args_list)
示例#43
0
 def _run(self):
     logger.debug("will spawn worker soon")
     sleep(1)
     self.container.spawn_worker(self, self.args, self.kwargs)
示例#44
0
    def init_host_with_rpc(self):
        ctxt = context.get_admin_context()
        self.request_service_capabilities(ctxt)

        eventlet.sleep(CONF.periodic_interval)
        self._startup_delay = False
示例#45
0
 def _sleep(self, wait_time):
     """Sleep for the specified number of seconds."""
     if ENABLE_SLEEP and wait_time is not None:
         LOG.debug('%s sleeping', six.text_type(self))
         eventlet.sleep(wait_time)
 def fake_send_wait(ctxt, topic, meters):
     fake_send.side_effect = mock.Mock()
     # Sleep to simulate concurrency and allow other threads to work
     eventlet.sleep(0)
示例#47
0
        self.remote_as = ev.remote_as
        self.prefix = ev.prefix
        self.nexthop = ev.nexthop


if __name__=="__main__":
    speaker = BGPSpeaker(as_number=20,router_id='1.1.1.2',
                         best_path_change_handler=dump_remote_best_path_change,
                         peer_down_handler=detect_peer_down
                         )
    #speaker.neighbor_add("10.108.90.1",10)
    #speaker.neighbor_add("10.108.91.1",30)
    speaker.neighbor_add("10.108.92.1",10)

    #uncomment the below line if the speaker needs to talk with a bmp server
    #speaker.bmp_server_add('192.168.177.2',11019)

    count=1
    while True:
        eventlet.sleep(10)
        prefix = ['10.108.93.0/24', '10.108.94.0/24']
        for i in prefix:
            print "add a new prefix", i
            speaker.prefix_add(i)
        count+=1
        if count == 10:
            speaker.shutdown()
            break


示例#48
0
    def run(self, *args, **kwargs):
        try:
            self.logger.info('event agent: starting')

            pool = GreenPool(len(self.workers))

            for worker in self.workers:
                pool.spawn(worker.start)

            def front(server, backend):
                while True:
                    msg = server.recv_multipart()
                    if validate_msg(msg):
                        try:
                            event_id = sqlite3.Binary(msg[2])
                            data = msg[3]
                            self.queue.put(event_id, data)
                            event = ['', msg[2], msg[3]]
                            backend.send_multipart(event)
                        except Exception:
                            pass
                        finally:
                            ack = msg[0:3]
                            server.send_multipart(ack)

            def back(backend):
                while True:
                    msg = backend.recv_multipart()
                    event_id = msg[1]
                    success = msg[2]
                    event_id = sqlite3.Binary(event_id)
                    if not success:
                        self.queue.failed(event_id)
                    else:
                        self.queue.delete(event_id)

            boss_pool = GreenPool(2)
            boss_pool.spawn_n(front, self.server, self.backend)
            boss_pool.spawn_n(back, self.backend)
            while True:
                sleep(1)

                now = time.time()
                if now - self.last_retry > self.retry_interval:
                    self.retry()
                    self.last_retry = now

                for w in self.workers:
                    if w.failed:
                        self.workers.remove(w)
                        self.logger.warn('restart worker "%s"', w.name)
                        new_w = EventWorker(self.conf, w.name, self.context)
                        self.workers.append(new_w)
                        pool.spawn(new_w.start)

        except Exception as e:
            self.logger.error('ERROR in main loop %s', e)
            raise e
        finally:
            self.logger.warn('event agent: stopping')
            self.stop_workers()

            self.logger.warn('ZMQ context being destroyed')
            self.context.destroy(linger=True)
            self.context = None
示例#49
0
def send_pickup():
    print("Picking up")
    pickup = {}
    sio.emit("pickup", pickup, skip_sid=True)
    eventlet.sleep(0)
    def lock(self, check_ts=True):
        """Lock based on the below condition:

        while -
            if no hash present
              insert lock and move on

            if hash present
              - check if TOPO_SYNC is present i.e. TopoSync ongoing or expired
                 - (y) check if the Previous TopoSync has expired
                    - (y) evict it, put self ts and move on
                  - if locked by self, return
                  - else if check_ts is false, sleep_and_retry

              - if no TOPO_SYNC
                - grab lock, check if TS has expired
                  - if expired, return true
                  - if not, put back old TS and return false

        :param check_ts:
        :return: boolean sync_required
                            True if all conditions met and lock acquired
                            False if locking not required
        """
        retry_sleep_time = random.randint(MIN_LOCK_RETRY_SLEEP_TIME_SECS,
                                          MAX_LOCK_RETRY_SLEEP_TIME_SECS)
        new_hash = self.lock_marker
        while True:
            res = self._get_current_record()
            # set prev_lock_ts based on record
            if res:
                if 'TOPO_SYNC' in res.hash:
                    self.prev_lock_ts = get_lock_owner(res.hash)
                else:
                    self.prev_lock_ts = str(res.hash)

            # try lock acquisition based on hash record
            if not res:
                # no hash present, try optimistically locking it
                if not self._insert_hash_with_lock():
                    # someone else beat us to it, sleep and retry
                    log_lock_acquisition_failure('0', self.lock_ts)
                    eventlet.sleep(retry_sleep_time)
                    continue
                # got the lock, execute update since nothing existed earlier
                LOG.debug(
                    "TOPO_SYNC: LockTS %(lockts)s, datetime %(dt_string)s has "
                    "grabbed the lock.", {
                        'lockts': self.lock_ts,
                        'dt_string': convert_ts_to_datetime(self.lock_ts)
                    })
                return True

            if 'TOPO_SYNC' in res.hash:
                # TOPO_SYNC already in progress. Check if timestamp is over
                # the limit
                prev_ts = get_lock_owner(res.hash)
                if self._is_timestamp_expired(
                        expire_secs=TOPO_SYNC_EXPIRED_SECS, prev_ts=prev_ts):
                    # optimistically update timestamp
                    if not self._optimistic_update_hash_record(res, new_hash):
                        # someone else update it before us, return
                        log_lock_acquisition_failure(prev_ts, self.lock_ts)
                        eventlet.sleep(retry_sleep_time)
                        continue
                    # grabbed the lock
                    LOG.debug(
                        "TOPO_SYNC: LockTS %(lock_ts)s, datetime "
                        "%(lock_dt_string)s has forcefully grabbed the lock. "
                        "PreviousTS %(prev_ts)s, datetime %(prev_dt_string)s "
                        "was over %(expired_secs)s old.", {
                            'lock_ts': self.lock_ts,
                            'lock_dt_string': convert_ts_to_datetime(
                                self.lock_ts),
                            'prev_ts': prev_ts,
                            'prev_dt_string': convert_ts_to_datetime(prev_ts)
                        })
                    return True
                else:
                    if prev_ts == self.lock_ts:
                        LOG.debug(
                            "TOPO_SYNC: LockTS %(lockts)s, datetime "
                            "%(dt_string)s has grabbed the lock.", {
                                'lockts': self.lock_ts,
                                'dt_string': convert_ts_to_datetime(
                                    self.lock_ts)
                            })
                        return True

                    if check_ts:
                        LOG.debug(
                            "TOPO_SYNC: LockTS %(lock_ts)s, datetime "
                            "%(lock_dt_string)s giving up since previous lock "
                            "not expired.", {
                                'lock_ts':
                                self.lock_ts,
                                'lock_dt_string':
                                convert_ts_to_datetime(self.lock_ts)
                            })
                        return False
                    LOG.debug(
                        "TOPO_SYNC: LockTS %(lock_ts)s, datetime "
                        "%(lock_dt_string)s waiting for in progress topo_sync "
                        "to complete.", {
                            'lock_ts': self.lock_ts,
                            'lock_dt_string': convert_ts_to_datetime(
                                self.lock_ts)
                        })
                    # this is the only place where we retry while waiting for
                    # timeout. don't wait indefinitely
                    self._increment_lock_retry()
                    eventlet.sleep(retry_sleep_time)
                    continue
            else:
                # nobody has the lock, grab it!
                if not self._optimistic_update_hash_record(res, new_hash):
                    # someone else updated it before us, sleep and retry
                    log_lock_acquisition_failure(res.hash, self.lock_ts)
                    eventlet.sleep(retry_sleep_time)
                    continue

                # finally grabbed the lock
                LOG.debug(
                    "TOPO_SYNC: LockTS %(lockts)s, datetime %(dt_string)s has "
                    "grabbed the lock.", {
                        'lockts': self.lock_ts,
                        'dt_string': convert_ts_to_datetime(self.lock_ts)
                    })
                if check_ts and not self._is_timestamp_expired(
                        expire_secs=TOPO_SYNC_EXPIRED_SECS, prev_ts=res.hash):
                    # replace with old hash, since we already grabbed the lock
                    LOG.debug(
                        "TOPO_SYNC: Giving up lock since check_ts is True and "
                        "previous timestamp not expired.")
                    self.put_hash(res.hash)
                    return False
                # lock grabbed and not returned. return True
                return True
示例#51
0
 def readfn(*args):
     result = fd.read(*args)
     sleep(0)
     return result
示例#52
0
 def _write_debug_stats(self):
     polling_interval = max(DEBUG_STATS_MIN_WRITE_INTERVAL,
                            self.conf.cisco_pnr.dns_stats_interval)
     while True:
         eventlet.sleep(polling_interval)
         self.debug_stats.write_stats_to_file()
示例#53
0
def handle_upload():
    global upload_key
    if request.form["auth"] != upload_key:
        return "nope"
    socketio.emit("line", {"content": "Data received.\nSaving file..."}, broadcast=True)
    eventlet.sleep()
    save_sketch(request.form["content"])
    socketio.emit("line", {"content": "Compiling and uploading..."}, broadcast=True)
    eventlet.sleep()
    proc = subprocess.Popen(["platformio", "run", "-d", "./platformio"], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
    error = False
    while True:
        line = proc.stdout.readline()
        if line and (not (line.startswith("Reading") or line.startswith("Writing"))):
            if "ERROR:" in line:
                error = True
            socketio.emit("line", {"content": line.rstrip()}, broadcast=True)
        else:
            break
        eventlet.sleep()
    if not error:
        socketio.emit("line", {"content": "Successfully uploaded source to Arduino!"}, broadcast=True)
        eventlet.sleep(1)
        socketio.emit("reafy", broadcast=True)
    else:
        socketio.emit("line", {"content": "Failed to upload to Arduino. Press Ctrl-I to continue."}, broadcast=True)
        eventlet.sleep()
        socketio.emit("uploaderror", broadcast=True)
    eventlet.sleep()
    return "done"
示例#54
0
def unlock():
    eventlet.sleep(2)
    pprint(kv_client1.r_unlock('test', 'test-1', 'test-token'))
def temp_1_task():
    global room_temperature
    room_temperature = 60.0
    while True:
        eventlet.sleep(2)
示例#56
0
 def _periodic_checking_thread(self):
     while self._monitor_processes:
         eventlet.sleep(self._config.AGENT.check_child_processes_interval)
         eventlet.spawn(self._check_child_processes)
示例#57
0
def running_backdoor():
    runner = object()
    green_socket, gt = setup_backdoor(runner, 0)
    eventlet.sleep(0)  # give backdoor a chance to spawn
    socket_name = green_socket.fd.getsockname()
    return socket_name
def temp_2_task():
    global condenser_temperature
    condenser_temperature = 55.0
    while True:
        eventlet.sleep(2)
示例#59
0
def eventlet_yield():
    """Swith to another eventlet coroutine."""
    sleep(0)
示例#60
0
def flow_factory(session, book,
                 applications,
                 upgradefile=None,
                 backupfile=None,
                 store=None,
                 db_flow_factory=database.mysql_flow_factory,
                 **kwargs):
    """
    @param session:                 class: sqlalchemy:session
    @param middlewares:             class:list EntityMiddleware
    @param upgradefile:             class:AppUpgradeFile    app upgrade file
    @param backupfile:              class:basestring of path/AppRemoteBackupFile  app backup file
    @param store:                   class:dict
    @param db_flow_factory:         class:function   默认database.mysql_flow_factory
    @param create_cls:              class:class      数据库创建任务类 参考database.MysqlCreate
    @param backup_cls:              class:class      数据库备份任务类 参考database.MysqlDump
    @param update_cls:              class:class      数据库更新任务类  参考database.MysqlUpdate
    """
    if not applications:
        raise RuntimeError('No application found')
    if upgradefile and not isinstance(upgradefile, TaskPublicFile):
        raise TypeError('upgradefile not TaskPublicFile')
    if backupfile and not isinstance(backupfile, TaskPublicFile):
        raise TypeError('backupfile not TaskPublicFile')
    store = store or {}
    if store.get('backupfile') or store.get('upgradefile'):
        raise RuntimeError('Backupfile or Upgradefile in store')

    # choice one entity by randomizing
    # 随机选择一个app
    app = applications[random.randint(0, len(applications)-1)]
    # 获取endpoint的名称
    endpoint_name = app.middleware.endpoint
    main_flow = lf.Flow('%s_taskflow' % endpoint_name)

    # prepare file for app update and database
    # 准备工作
    prepare_uflow = uf.Flow('%s_prepare' % endpoint_name)
    # 下载程序更新文件
    if upgradefile:
        rebind = ['download_timeout']
        format_store_rebind(store, rebind)
        #  get app update file, all middlewares use same app upload file
        prepare_uflow.add(application.AppUpgradeFileGet(app.middleware, upgradefile, rebind=rebind))
    # 备份程序文件
    if backupfile:
        rebind = ['download_timeout']
        format_store_rebind(store, rebind)
        prepare_uflow.add(application.AppBackUp(app.middleware, backupfile, rebind=rebind))
    # 下载数据库更新文件
    if app.databases and not all([False if d.update else True for d in app.databases]):
        rebind = ['download_timeout']
        format_store_rebind(store, rebind)
        # get database upload file, all middlewares use same database upload file
        prepare_uflow.add(database.DbUpdateSqlGet(app.middleware, app.databases, rebind=rebind))
    if len(prepare_uflow):
        main_flow.add(prepare_uflow)
    else:
        del prepare_uflow

    entitys_taskflow = uf.Flow('%s_entitys_task' % endpoint_name)
    # 批量更新操作
    for app in applications:
        # all entity task
        entitys_taskflow.add(entity_factory(session, book, app, store,
                                            upgradefile, backupfile,
                                            db_flow_factory, **kwargs))
        eventlet.sleep(0)
    main_flow.add(entitys_taskflow)

    return main_flow