Esempio n. 1
0
def execute_with_timeout(*args, **kwargs):
    time = kwargs.get('timeout', 30)

    def cb_timeout():
        msg = (_("Time out after waiting"
                 " %(time)s seconds when running proc: %(args)s"
                 " %(kwargs)s") % {'time': time, 'args': args,
                                   'kwargs': kwargs})
        LOG.error(msg)
        raise exception.ProcessExecutionError(msg)

    timeout = Timeout(time)
    try:
        return execute(*args, **kwargs)
    except Timeout as t:
        if t is not timeout:
            LOG.error("Timeout reached but not from our timeout. This is bad!")
            raise
        else:
            msg = (_("Time out after waiting "
                     "%(time)s seconds when running proc: %(args)s"
                     " %(kwargs)s") % {'time': time, 'args': args,
                                       'kwargs': kwargs})
            LOG.error(msg)
            raise exception.ProcessExecutionError(msg)
    finally:
        timeout.cancel()
 def readRegister(self, address, cache=False, timeout=2):
     """Return the value of register at [address] location
     if timeout is elapsed, NoneValue is returned
     """
     if not isinstance(address, int):
         raise TypeError("Address must be an integer")
     if int(address) < 0 or int(address) > 255:
         raise ValueError("Address is out of range [0, 255]")
     if address in self.regs.keys() and cache:
         if not self.regs[address] == None:
             return self.regs[address]
     self.regs[address] = None
     self.vscp.sendSimpleEvent(
         vscp_class=constant.VSCP_CLASS1_PROTOCOL,
         vscp_type=constant.VSCP_TYPE_PROTOCOL_READ_REGISTER,
         vscp_data=[self.id, address],
     )
     try:
         timer = Timeout(timeout, self.__timeout)
         while self.regs[address] == None:
             time.sleep(0.02)
         timer.cancel()
         return self.regs[address]
     except VSCPException:
         return None
Esempio n. 3
0
    def rengine_side(self, appid, token, uri):
        """ Handle rengine (client) GET requests """
        if not self.rengine_authorization_ok(appid, token):
            LOGGER.info('Rengine content request authorization fails')
            abort(401, 'Authorization failed')

        evt = Event()
        request_id = str(uuid4())
        self.request_id_events[request_id] = evt

        headers = ["%s: %s" % (header, val) for (header, val) in request.headers.items()]
        packet = ScpPacket.make_sfkcontent(uri, request_id, headers)
        try:
            self._send(packet, appid)
        except Exception as e:
            abort(500, str(e))

        LOGGER.debug("uri %s expected" % uri)
        timeout = Timeout(TIMEOUT)
        try:
            resp = evt.wait()
        except Timeout:
            del self.request_id_events[request_id]
            abort(504, 'Gateway Timeout')
        finally:
            timeout.cancel()

        LOGGER.debug("uri %s got" % uri)
        
        return resp
 def writeRegister(self, address, value, timeout=2):
     """Return True if value is correctly written
     if timeout is elapsed, False is returned
     """
     if not isinstance(address, int):
         raise TypeError("Address must be an integer")
     if int(address) < 0 or int(address) > 255:
         raise ValueError("Address is out of range [0, 255]")
     if not isinstance(value, int):
         raise TypeError("Value must be an integer")
     if int(value) < 0 or int(value) > 255:
         raise ValueError("Value is out of range [0, 255]")
     self.regs[address] = None
     self.vscp.sendSimpleEvent(
         vscp_class=constant.VSCP_CLASS1_PROTOCOL,
         vscp_type=constant.VSCP_TYPE_PROTOCOL_WRITE_REGISTER,
         vscp_data=[self.id, address, value],
     )
     try:
         timer = Timeout(timeout, self.__timeout)
         self.regs[address] = None
         while self.regs[address] == None:
             time.sleep(0.02)
         timer.cancel()
         return self.regs[address] == value
     except VSCPException:
         return False
Esempio n. 5
0
    def shrink_cluster(self, context, cluster_id, instance_ids):
        LOG.debug("begin shrink_cluster for MongoDB cluster %s" % cluster_id)

        def _shrink_cluster():
            def all_instances_marked_deleted():
                non_deleted_instances = DBInstance.find_all(
                    cluster_id=cluster_id, deleted=False).all()
                non_deleted_ids = [db_instance.id for db_instance
                                   in non_deleted_instances]
                return not bool(
                    set(instance_ids).intersection(set(non_deleted_ids))
                )
            try:
                utils.poll_until(all_instances_marked_deleted,
                                 sleep_time=2,
                                 time_out=CONF.cluster_delete_time_out)
            except PollTimeOut:
                LOG.error(_("timeout for instances to be marked as deleted."))
                return

        cluster_usage_timeout = CONF.cluster_usage_timeout
        timeout = Timeout(cluster_usage_timeout)
        try:
            _shrink_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("timeout for shrinking cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("end shrink_cluster for MongoDB cluster %s" % self.id)
Esempio n. 6
0
    def grow_cluster(self, context, cluster_id, new_instance_ids):

        def _grow_cluster():
            LOG.debug("begin grow_cluster for Vertica cluster %s" % cluster_id)

            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()

            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for new cluster members to get to cluster-ready status.
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            new_insts = [Instance.load(context, instance_id)
                         for instance_id in new_instance_ids]

            existing_instances = [Instance.load(context, instance_id)
                                  for instance_id
                                  in instance_ids
                                  if instance_id not in new_instance_ids]

            existing_guests = [self.get_guest(i) for i in existing_instances]
            new_guests = [self.get_guest(i) for i in new_insts]
            all_guests = new_guests + existing_guests

            authorized_users_without_password = ['root', 'dbadmin']
            new_ips = [self.get_ip(instance) for instance in new_insts]

            for user in authorized_users_without_password:
                pub_key = [guest.get_public_keys(user) for guest in all_guests]
                for guest in all_guests:
                    guest.authorize_public_keys(user, pub_key)

            for db_instance in db_instances:
                if db_instance['type'] == 'master':
                    LOG.debug("Found 'master' instance, calling grow on guest")
                    master_instance = Instance.load(context,
                                                    db_instance.id)
                    self.get_guest(master_instance).grow_cluster(new_ips)
                    break

            for guest in new_guests:
                guest.cluster_complete()

        timeout = Timeout(CONF.cluster_usage_timeout)

        try:
            _grow_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for growing cluster."))
            self.update_statuses_on_failure(cluster_id)
        except Exception:
            LOG.exception(_("Error growing cluster %s.") % cluster_id)
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()
Esempio n. 7
0
    def piper(self, in_sock, out_sock, out_addr, onkill):
        "Worker thread for data reading"
        try:
            timeout = Timeout(self.transmission_timeout_seconds)
            try:
                while True:
                    written = in_sock.recv(32768)
                    if not written:
                        try:
                            out_sock.shutdown(socket.SHUT_WR)
                        except socket.error:
                            self.threads[onkill].kill()
                        break
                    try:
                        out_sock.sendall(written)
                    except socket.error:
                        pass
                    self.data_handled += len(written)
            finally:
                timeout.cancel()
        except greenlet.GreenletExit:
            return
        except Timeout:
            # This one prevents only from closing connection without any data nor status code returned
            # from mantrid when no data was received from backend.
            # When it happens, nginx reports 'upstream prematurely closed connection' and returns 500,
            # and want to have our custom error page to know when it happens. 

            if onkill == "stoc" and self.data_handled == 0:
                out_sock.sendall("HTTP/1.0 594 Backend timeout\r\nConnection: close\r\nContent-length: 0\r\n\r\n")
            logging.warn("Timeout serving request to backend %s of %s", self.backend, self.host)
            return
Esempio n. 8
0
def execute_with_timeout(*args, **kwargs):
    time = kwargs.pop('timeout', 30)
    log_output_on_error = kwargs.pop('log_output_on_error', False)

    timeout = Timeout(time)
    try:
        return execute(*args, **kwargs)
    except exception.ProcessExecutionError as e:
        if log_output_on_error:
            LOG.error(
                _("Command '%(cmd)s' failed. %(description)s "
                  "Exit code: %(exit_code)s\nstderr: %(stderr)s\n"
                  "stdout: %(stdout)s") %
                {'cmd': e.cmd, 'description': e.description or '',
                 'exit_code': e.exit_code, 'stderr': e.stderr,
                 'stdout': e.stdout})
        raise
    except Timeout as t:
        if t is not timeout:
            LOG.error(_("Got a timeout but not the one expected."))
            raise
        else:
            msg = (_("Time out after waiting "
                     "%(time)s seconds when running proc: %(args)s"
                     " %(kwargs)s.") % {'time': time, 'args': args,
                                        'kwargs': kwargs})
            LOG.error(msg)
            raise exception.ProcessExecutionError(msg)
    finally:
        timeout.cancel()
Esempio n. 9
0
    def create_cluster(self, context, cluster_id):
        LOG.debug("Begin create_cluster for id: %s." % cluster_id)

        def _create_cluster():

            # Fetch instances by cluster_id against instances table.
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            # Connect nodes to the first node
            guests = [self.get_guest(instance) for instance in instances]
            try:
                cluster_head = instances[0]
                cluster_head_port = '6379'
                cluster_head_ip = self.get_ip(cluster_head)
                for guest in guests[1:]:
                    guest.cluster_meet(cluster_head_ip, cluster_head_port)

                num_nodes = len(instances)
                total_slots = 16384
                slots_per_node = total_slots / num_nodes
                leftover_slots = total_slots % num_nodes
                first_slot = 0
                for guest in guests:
                    last_slot = first_slot + slots_per_node
                    if leftover_slots > 0:
                        leftover_slots -= 1
                    else:
                        last_slot -= 1
                    guest.cluster_addslots(first_slot, last_slot)
                    first_slot = last_slot + 1

                for guest in guests:
                    guest.cluster_complete()
            except Exception:
                LOG.exception(_("Error creating cluster."))
                self.update_statuses_on_failure(cluster_id)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _create_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for building cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End create_cluster for id: %s." % cluster_id)
Esempio n. 10
0
 def request(self, address, path, method='GET', params=None, headers={}):
     t = Timeout(2, RuntimeError("Timeout trying to send request."))
     try:
         conn = httplib.HTTPConnection("%s:%s" % address)
         conn.request(method, path, params, headers)
     finally:
         t.cancel()
     return conn.getresponse()
Esempio n. 11
0
    def create_cluster(self, context, cluster_id):
        LOG.debug("Begin create_cluster for id: %s." % cluster_id)

        def _create_cluster():

            # Fetch instances by cluster_id against instances table.
            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            instances = [Instance.load(context, instance_id) for instance_id
                         in instance_ids]

            member_ips = [self.get_ip(instance) for instance in instances]
            guests = [self.get_guest(instance) for instance in instances]

            # Users to be configured for password-less SSH.
            authorized_users_without_password = ['root', 'dbadmin']

            # Configuring password-less SSH for cluster members.
            # Strategy for setting up SSH:
            # get public keys for user from member-instances in cluster,
            # combine them, finally push it back to all instances,
            # and member instances add them to authorized keys.
            LOG.debug("Configuring password-less SSH on cluster members.")
            try:
                for user in authorized_users_without_password:
                    pub_key = [guest.get_public_keys(user) for guest in guests]
                    for guest in guests:
                        guest.authorize_public_keys(user, pub_key)

                LOG.debug("Installing cluster with members: %s." % member_ips)
                guests[0].install_cluster(member_ips)

                LOG.debug("Finalizing cluster configuration.")
                for guest in guests:
                    guest.cluster_complete()
            except Exception:
                LOG.exception(_("Error creating cluster."))
                self.update_statuses_on_failure(cluster_id)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _create_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for building cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End create_cluster for id: %s." % cluster_id)
Esempio n. 12
0
 def wrapper(*args,**kwargs):
     timeout = Timeout(time, exception)
     try:
         func(*args,**kwargs)
     except exception as e:
         if reraise:
             raise e
     else:
         timeout.cancel()
Esempio n. 13
0
 def update_data(self):
     print "updating data"
     timeout = Timeout(10)
     try:
         self.parser = KMLParser()
         temp = self.parse_data()
         timeout.cancel()
         self.bikeways = temp
         self.update_database()
     except:
         print "couldn't get data"
Esempio n. 14
0
 def _write_with_timeout(self, writer, chunk):
     timeout = Timeout(self.timeout)
     try:
         writer.write(chunk)
     except Timeout as t:
         if t is timeout:
             writer.close()
             raise t
     except Exception as e:
         raise e
     finally:
         timeout.cancel()
Esempio n. 15
0
    def add_shard_cluster(self, context, cluster_id, shard_id,
                          replica_set_name):

        LOG.debug("begin add_shard_cluster for cluster %s shard %s"
                  % (cluster_id, shard_id))

        def _add_shard_cluster():

            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               shard_id=shard_id).all()
            instance_ids = [db_instance.id for db_instance in db_instances]
            LOG.debug("instances in shard %s: %s" % (shard_id,
                                                     instance_ids))
            if not self._all_instances_ready(instance_ids, cluster_id,
                                             shard_id):
                return

            members = [Instance.load(context, instance_id)
                       for instance_id in instance_ids]

            if not self._create_replica_set(members, cluster_id, shard_id):
                return

            db_query_routers = DBInstance.find_all(cluster_id=cluster_id,
                                                   type='query_router',
                                                   deleted=False).all()
            query_routers = [Instance.load(context, db_query_router.id)
                             for db_query_router in db_query_routers]

            if not self._create_shard(query_routers, replica_set_name,
                                      members, cluster_id, shard_id):
                return

            for member in members:
                self.get_guest(member).cluster_complete()

        cluster_usage_timeout = CONF.cluster_usage_timeout
        timeout = Timeout(cluster_usage_timeout)
        try:
            _add_shard_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("timeout for building shard."))
            self.update_statuses_on_failure(cluster_id, shard_id)
        finally:
            timeout.cancel()

        LOG.debug("end add_shard_cluster for cluster %s shard %s"
                  % (cluster_id, shard_id))
Esempio n. 16
0
    def put(self, item, block=True, timeout=None):
        """Put an item into the queue.

        If optional arg *block* is true and *timeout* is ``None`` (the default),
        block if necessary until a free slot is available. If *timeout* is
        a positive number, it blocks at most *timeout* seconds and raises
        the :class:`Full` exception if no free slot was available within that time.
        Otherwise (*block* is false), put an item on the queue if a free slot
        is immediately available, else raise the :class:`Full` exception (*timeout*
        is ignored in that case).
        """
        if self.maxsize is None or self.qsize() < self.maxsize:
            # there's a free slot, put an item right away
            self._put(item)
            if self.getters:
                self._schedule_unlock()
        elif not block and get_hub().greenlet is getcurrent():
            # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though
            # find a getter and deliver an item to it
            while self.getters:
                getter = self.getters.pop()
                if getter:
                    self._put(item)
                    item = self._get()
                    getter.switch(item)
                    return
            raise Full
        elif block:
            waiter = ItemWaiter(item, block)
            self.putters.add(waiter)
            timeout = Timeout(timeout, Full)
            try:
                if self.getters:
                    self._schedule_unlock()
                result = waiter.wait()
                assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
                if waiter.item is not _NONE:
                    self._put(item)
            finally:
                timeout.cancel()
                self.putters.discard(waiter)
        elif self.getters:
            waiter = ItemWaiter(item, block)
            self.putters.add(waiter)
            self._schedule_unlock()
            result = waiter.wait()
            assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
            if waiter.item is not _NONE:
                raise Full
        else:
            raise Full
Esempio n. 17
0
    def _check_health(self):
        logging.debug("Checking health of %s", self)
        try:
            timeout = Timeout(self.healthcheck_timeout_seconds)
            try:
                socket = eventlet.connect((self.host, self.port))
            finally:
                timeout.cancel()

            logging.debug("%s is alive, making sure it is not blacklisted", self)
            self.blacklisted = False
            socket.close()
        except:
            logging.debug("%s seems dead, will check again later", self)
Esempio n. 18
0
 def recv_events(self, timeout_msecs):
     events = []
     timeout = Timeout(timeout_msecs / 1000.0)
     try:
         while True:
             event = self._event_q.get()
             events.append(event)
     except Timeout:
         pass
     except Exception as e:
         raise e
     finally:
         timeout.cancel()
     return events
Esempio n. 19
0
    def create_cluster(self, context, cluster_id):
        LOG.debug("Begin create_cluster for id: %s." % cluster_id)

        def _create_cluster():
            cluster_node_ids = self.find_cluster_node_ids(cluster_id)

            # Wait for cluster nodes to get to cluster-ready status.
            LOG.debug("Waiting for all nodes to become ready.")
            if not self._all_instances_ready(cluster_node_ids, cluster_id):
                return

            cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids)
            coordinator = self._get_coordinator_node(cluster_nodes)

            LOG.debug("Initializing the cluster on node '%s'."
                      % coordinator['ip'])

            # start with the coordinator as it will have all the required
            # services.
            guest_node_info = self.build_guest_node_info([coordinator])
            # now add all the other nodes so we can get a list of all services
            # needed to calculate the memory allocation properly.
            add_node_info = [node for node in cluster_nodes
                             if node != coordinator]
            guest_node_info.extend(self.build_guest_node_info(add_node_info))
            coordinator['guest'].initialize_cluster(guest_node_info)

            self._add_nodes(coordinator, add_node_info)
            coordinator['guest'].cluster_complete()
            LOG.debug("Cluster create finished successfully.")

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            with EndNotification(context, cluster_id=cluster_id):
                _create_cluster()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for building cluster."))
            self.update_statuses_on_failure(cluster_id)
        except Exception:
            LOG.exception(_("Error creating cluster."))
            self.update_statuses_on_failure(cluster_id)
            raise
        finally:
            self.reset_task()
            timeout.cancel()

        LOG.debug("End create_cluster for id: %s." % cluster_id)
Esempio n. 20
0
    def shrink_cluster(self, context, cluster_id, instance_ids):
        def _shrink_cluster():
            db_instances = DBInstance.find_all(cluster_id=cluster_id,
                                               deleted=False).all()

            all_instance_ids = [db_instance.id for db_instance in db_instances]

            remove_instances = [Instance.load(context, instance_id)
                                for instance_id in instance_ids]

            left_instances = [Instance.load(context, instance_id)
                              for instance_id
                              in all_instance_ids
                              if instance_id not in instance_ids]

            remove_member_ips = [self.get_ip(instance)
                                 for instance in remove_instances]

            k = VerticaCluster.k_safety(len(left_instances))

            for db_instance in db_instances:
                if db_instance['type'] == 'master':
                    master_instance = Instance.load(context,
                                                    db_instance.id)
                    if self.get_ip(master_instance) in remove_member_ips:
                        raise RuntimeError(_("Cannot remove master instance!"))
                    LOG.debug(_("Marking cluster k-safety: %s") % k)
                    self.get_guest(master_instance).mark_design_ksafe(k)
                    self.get_guest(master_instance).shrink_cluster(
                        remove_member_ips)
                    break

            for r in remove_instances:
                Instance.delete(r)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _shrink_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise
            LOG.exception(_("Timeout for shrinking cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("end shrink_cluster for Vertica cluster id %s" % self.id)
Esempio n. 21
0
    def grow_cluster(self, context, cluster_id, new_instance_ids):
        LOG.debug("Begin grow_cluster for id: %s." % cluster_id)

        def _grow_cluster():

            db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
            cluster_head = next(Instance.load(context, db_inst.id)
                                for db_inst in db_instances
                                if db_inst.id not in new_instance_ids)
            if not cluster_head:
                raise TroveError("Unable to determine existing Redis cluster "
                                 "member")

            (cluster_head_ip, cluster_head_port) = (
                self.get_guest(cluster_head).get_node_ip())

            # Wait for cluster members to get to cluster-ready status.
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            LOG.debug("All members ready, proceeding for cluster setup.")
            new_insts = [Instance.load(context, instance_id)
                         for instance_id in new_instance_ids]
            new_guests = map(self.get_guest, new_insts)

            # Connect nodes to the cluster head
            for guest in new_guests:
                guest.cluster_meet(cluster_head_ip, cluster_head_port)

            for guest in new_guests:
                guest.cluster_complete()

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _grow_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for growing cluster."))
            self.update_statuses_on_failure(cluster_id)
        except Exception:
            LOG.exception(_("Error growing cluster %s.") % cluster_id)
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End grow_cluster for id: %s." % cluster_id)
Esempio n. 22
0
    def downloadDM(self, timeout=2, cached=False):
        self.DM = None
        self.vscp.sendSimpleEvent(
            vscp_class=constant.VSCP_CLASS1_PROTOCOL,
            vscp_type=constant.VSCP_TYPE_PROTOCOL_GET_MATRIX_INFO,
            vscp_data=[self.nodeId],
        )
        timer = Timeout(timeout, self.__timeout)
        try:
            while self.DM == None:
                time.sleep(0.02)
            timer.cancel()
            for i in range(self.DM["size"]):
                self.DM["rows"][i] = self.getRowOfDM(i)

        except:
            return constant.VSCP_ERROR_ERROR
Esempio n. 23
0
    def grow_cluster(self, context, cluster_id, new_instance_ids):
        LOG.debug("Begin grow_cluster for id: %s." % cluster_id)

        def _grow_cluster():
            # Wait for new nodes to get to cluster-ready status.
            LOG.debug("Waiting for new nodes to become ready.")
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            new_instances = [Instance.load(context, instance_id)
                             for instance_id in new_instance_ids]
            add_node_info = [self.build_node_info(instance)
                             for instance in new_instances]

            LOG.debug("All nodes ready, proceeding with cluster setup.")

            cluster_node_ids = self.find_cluster_node_ids(cluster_id)
            cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids)

            old_node_info = [node for node in cluster_nodes
                             if node['id'] not in new_instance_ids]

            # Rebalance the cluster via one of the existing nodes.
            # Clients can continue to store and retrieve information and
            # do not need to be aware that a rebalance operation is taking
            # place.
            coordinator = old_node_info[0]
            self._add_nodes(coordinator, add_node_info)
            LOG.debug("Cluster grow finished successfully.")

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            with EndNotification(context, cluster_id=cluster_id):
                _grow_cluster()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for growing cluster."))
        except Exception:
            LOG.exception(_("Error growing cluster."))
            raise
        finally:
            self.reset_task()
            timeout.cancel()

        LOG.debug("End grow_cluster for id: %s." % cluster_id)
Esempio n. 24
0
    def grow_cluster(self, context, cluster_id, new_instance_ids):
        LOG.debug("Begin grow_cluster for id: %s." % cluster_id)

        def _grow_cluster():
            # Wait for new nodes to get to cluster-ready status.
            LOG.debug("Waiting for new nodes to become ready.")
            if not self._all_instances_ready(new_instance_ids, cluster_id):
                return

            new_instances = [Instance.load(context, instance_id)
                             for instance_id in new_instance_ids]
            added_nodes = [self.build_node_info(instance)
                           for instance in new_instances]

            LOG.debug("All nodes ready, proceeding with cluster setup.")

            cluster_node_ids = self.find_cluster_node_ids(cluster_id)
            cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids)

            # Rebalance the cluster via one of the existing nodes.
            # Clients can continue to store and retrieve information and
            # do not need to be aware that a rebalance operation is taking
            # place.
            # The new nodes are marked active only if the rebalancing
            # completes.
            try:
                coordinator = cluster_nodes[0]
                self._add_nodes(coordinator, added_nodes)
                LOG.debug("Cluster configuration finished successfully.")
            except Exception:
                LOG.exception(_("Error growing cluster."))
                self.update_statuses_on_failure(cluster_id)

        timeout = Timeout(CONF.cluster_usage_timeout)
        try:
            _grow_cluster()
            self.reset_task()
        except Timeout as t:
            if t is not timeout:
                raise  # not my timeout
            LOG.exception(_("Timeout for growing cluster."))
            self.update_statuses_on_failure(cluster_id)
        finally:
            timeout.cancel()

        LOG.debug("End grow_cluster for id: %s." % cluster_id)
Esempio n. 25
0
 def recv_events(self, timeout_msecs):
     """
     Let explorer receive events
     :param timeout_msecs: int
     :return:
     """
     events = []
     timeout = Timeout(timeout_msecs / 1000.0)
     try:
         while True:
             event = self._event_q.get()
             events.append(event)
     except Timeout:
         pass
     except Exception as e:
         raise e
     finally:
         timeout.cancel()
     return events
Esempio n. 26
0
    def get(self, block=True, timeout=None):
        """Remove and return an item from the queue.

        If optional args *block* is true and *timeout* is ``None`` (the default),
        block if necessary until an item is available. If *timeout* is a positive number,
        it blocks at most *timeout* seconds and raises the :class:`Empty` exception
        if no item was available within that time. Otherwise (*block* is false), return
        an item if one is immediately available, else raise the :class:`Empty` exception
        (*timeout* is ignored in that case).
        """
        if self.qsize():
            if self.putters:
                self._schedule_unlock()
            return self._get()
        elif not block and get_hub().greenlet is getcurrent():
            # special case to make get_nowait() runnable in the mainloop greenlet
            # there are no items in the queue; try to fix the situation by unlocking putters
            while self.putters:
                putter = self.putters.pop()
                if putter:
                    putter.switch(putter)
                    if self.qsize():
                        return self._get()
            raise Empty
        elif block:
            waiter = Waiter()
            timeout = Timeout(timeout, Empty)
            try:
                self.getters.add(waiter)
                if self.putters:
                    self._schedule_unlock()
                try:
                    return waiter.wait()
                except:
                    self._schedule_unlock()
                    raise
            finally:
                self.getters.discard(waiter)
                timeout.cancel()
        else:
            raise Empty
Esempio n. 27
0
    def wait(self):
        do_wait = True
        up = 0
        to = Timeout(self.sandbox_wait_timeout)
        try:
            while do_wait == True:
                rc = self.ping()
                if (rc != 1):
                    time.sleep(self.sandbox_ping_interval)
                    continue
                else:
                    to.cancel()
                    do_wait = False
                    up = 1
        except Timeout as t:
            self.logger.info("wait for sandbox %s timedout" % self.account)
            do_wait = False
        finally:
            to.cancel()

        return up
Esempio n. 28
0
def execute_with_timeout(*args, **kwargs):
    time = kwargs.get('timeout', 30)

    def cb_timeout():
        msg = _("Time out after waiting"
                " %(time)s seconds when running proc: %(args)s"
                " %(kwargs)s") % locals()
        raise exception.ProcessExecutionError(msg)

    timeout = Timeout(time)
    try:
        return execute(*args, **kwargs)
    except Timeout as t:
        if t is not timeout:
            raise
        else:
            msg = _("Time out after waiting "
                    "%(time)s seconds when running proc: %(args)s"
                    " %(kwargs)s") % locals()
            raise exception.ProcessExecutionError(msg)
    finally:
        timeout.cancel()
Esempio n. 29
0
    def handle(self, sock, read_data, path, headers):
        request_id = headers.get("X-Request-Id", "-")
        for attempt in range(self.attempts):
            if attempt > 0:
                logging.warn("[%s] Retrying connection for host %s", request_id, self.host)

            backend = self.select_backend()
            try:
                timeout = Timeout(self.connection_timeout_seconds)
                try:
                    server_sock = eventlet.connect((backend.host, backend.port))
                finally:
                    timeout.cancel()

                backend.add_connection()
                break
            except socket.error:
                logging.error("[%s] Proxy socket error on connect() to %s of %s", request_id, backend, self.host)
                self.blacklist(backend)
                eventlet.sleep(self.delay)
                continue
            except:
                logging.warn("[%s] Proxy timeout on connect() to %s of %s", request_id, backend, self.host)
                self.blacklist(backend)
                eventlet.sleep(self.delay)
                continue

        # Function to help track data usage
        def send_onwards(data):
            server_sock.sendall(data)
            return len(data)

        try:
            size = send_onwards(read_data)
            size += SocketMelder(sock, server_sock, backend, self.host).run()
        except socket.error, e:
            if e.errno != errno.EPIPE:
                raise
Esempio n. 30
0
def execute_with_timeout(*args, **kwargs):
    time = kwargs.pop("timeout", 30)
    log_output_on_error = kwargs.pop("log_output_on_error", False)

    timeout = Timeout(time)
    try:
        return execute(*args, **kwargs)
    except exception.ProcessExecutionError as e:
        if log_output_on_error:
            LOG.error(
                _(
                    "Command '%(cmd)s' failed. %(description)s "
                    "Exit code: %(exit_code)s\nstderr: %(stderr)s\n"
                    "stdout: %(stdout)s"
                )
                % {
                    "cmd": e.cmd,
                    "description": e.description or "",
                    "exit_code": e.exit_code,
                    "stderr": e.stderr,
                    "stdout": e.stdout,
                }
            )
        raise
    except Timeout as t:
        if t is not timeout:
            LOG.error(_("Got a timeout but not the one expected."))
            raise
        else:
            msg = _("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s.") % {
                "time": time,
                "args": args,
                "kwargs": kwargs,
            }
            LOG.error(msg)
            raise exception.ProcessExecutionError(msg)
    finally:
        timeout.cancel()