def piper(self, in_sock, out_sock, out_addr, onkill): "Worker thread for data reading" try: timeout = Timeout(self.transmission_timeout_seconds) try: while True: written = in_sock.recv(32768) if not written: try: out_sock.shutdown(socket.SHUT_WR) except socket.error: self.threads[onkill].kill() break try: out_sock.sendall(written) except socket.error: pass self.data_handled += len(written) finally: timeout.cancel() except greenlet.GreenletExit: return except Timeout: # This one prevents only from closing connection without any data nor status code returned # from mantrid when no data was received from backend. # When it happens, nginx reports 'upstream prematurely closed connection' and returns 500, # and want to have our custom error page to know when it happens. if onkill == "stoc" and self.data_handled == 0: out_sock.sendall( "HTTP/1.0 594 Backend timeout\r\nConnection: close\r\nContent-length: 0\r\n\r\n" ) logging.warn("Timeout serving request to backend %s of %s", self.backend, self.host) return
def rengine_side(self, appid, token, uri): """ Handle rengine (client) GET requests """ if not self.rengine_authorization_ok(appid, token): LOGGER.info('Rengine content request authorization fails') abort(401, 'Authorization failed') evt = Event() request_id = str(uuid4()) self.request_id_events[request_id] = evt headers = ["%s: %s" % (header, val) for (header, val) in request.headers.items()] packet = ScpPacket.make_sfkcontent(uri, request_id, headers) try: self._send(packet, appid) except Exception as e: abort(500, str(e)) LOGGER.debug("uri %s expected" % uri) timeout = Timeout(TIMEOUT) try: resp = evt.wait() except Timeout: del self.request_id_events[request_id] abort(504, 'Gateway Timeout') finally: timeout.cancel() LOGGER.debug("uri %s got" % uri) return resp
def shrink_cluster(self, context, cluster_id, instance_ids): LOG.debug("begin shrink_cluster for MongoDB cluster %s", cluster_id) def _shrink_cluster(): def all_instances_marked_deleted(): non_deleted_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() non_deleted_ids = [db_instance.id for db_instance in non_deleted_instances] return not bool( set(instance_ids).intersection(set(non_deleted_ids)) ) try: utils.poll_until(all_instances_marked_deleted, sleep_time=2, time_out=CONF.cluster_delete_time_out) except PollTimeOut: LOG.error(_("timeout for instances to be marked as deleted.")) return cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("timeout for shrinking cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end shrink_cluster for MongoDB cluster %s", self.id)
def writeRegister(self, address, value, timeout=2): """Return True if value is correctly written if timeout is elapsed, False is returned """ if not isinstance(address, int): raise TypeError("Address must be an integer") if int(address) < 0 or int(address) > 255: raise ValueError("Address is out of range [0, 255]") if not isinstance(value, int): raise TypeError("Value must be an integer") if int(value) < 0 or int(value) > 255: raise ValueError("Value is out of range [0, 255]") self.regs[address] = None self.vscp.sendSimpleEvent( vscp_class=constant.VSCP_CLASS1_PROTOCOL, vscp_type=constant.VSCP_TYPE_PROTOCOL_WRITE_REGISTER, vscp_data=[self.id, address, value], ) try: timer = Timeout(timeout, self.__timeout) self.regs[address] = None while self.regs[address] == None: time.sleep(0.02) timer.cancel() return self.regs[address] == value except VSCPException: return False
def readRegister(self, address, cache=False, timeout=2): """Return the value of register at [address] location if timeout is elapsed, NoneValue is returned """ if not isinstance(address, int): raise TypeError("Address must be an integer") if int(address) < 0 or int(address) > 255: raise ValueError("Address is out of range [0, 255]") if address in self.regs.keys() and cache: if not self.regs[address] == None: return self.regs[address] self.regs[address] = None self.vscp.sendSimpleEvent( vscp_class=constant.VSCP_CLASS1_PROTOCOL, vscp_type=constant.VSCP_TYPE_PROTOCOL_READ_REGISTER, vscp_data=[self.id, address], ) try: timer = Timeout(timeout, self.__timeout) while self.regs[address] == None: time.sleep(0.02) timer.cancel() return self.regs[address] except VSCPException: return None
def execute_with_timeout(*args, **kwargs): time = kwargs.get('timeout', 30) def cb_timeout(): msg = (_("Time out after waiting" " %(time)s seconds when running proc: %(args)s" " %(kwargs)s") % {'time': time, 'args': args, 'kwargs': kwargs}) LOG.error(msg) raise exception.ProcessExecutionError(msg) timeout = Timeout(time) try: return execute(*args, **kwargs) except Timeout as t: if t is not timeout: LOG.error("Timeout reached but not from our timeout. This is bad!") raise else: msg = (_("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s") % {'time': time, 'args': args, 'kwargs': kwargs}) LOG.error(msg) raise exception.ProcessExecutionError(msg) finally: timeout.cancel()
def execute_with_timeout(*args, **kwargs): time = kwargs.pop('timeout', 30) log_output_on_error = kwargs.pop('log_output_on_error', False) timeout = Timeout(time) try: return execute(*args, **kwargs) except exception.ProcessExecutionError as e: if log_output_on_error: LOG.error( _("Command '%(cmd)s' failed. %(description)s " "Exit code: %(exit_code)s\nstderr: %(stderr)s\n" "stdout: %(stdout)s") % {'cmd': e.cmd, 'description': e.description or '', 'exit_code': e.exit_code, 'stderr': e.stderr, 'stdout': e.stdout}) raise except Timeout as t: if t is not timeout: LOG.error(_("Got a timeout but not the one expected.")) raise else: msg = (_("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s.") % {'time': time, 'args': args, 'kwargs': kwargs}) LOG.error(msg) raise exception.ProcessExecutionError(msg) finally: timeout.cancel()
def shrink_cluster(self, context, cluster_id, instance_ids): LOG.debug("begin shrink_cluster for MongoDB cluster %s" % cluster_id) def _shrink_cluster(): def all_instances_marked_deleted(): non_deleted_instances = DBInstance.find_all( cluster_id=cluster_id, deleted=False).all() non_deleted_ids = [db_instance.id for db_instance in non_deleted_instances] return not bool( set(instance_ids).intersection(set(non_deleted_ids)) ) try: utils.poll_until(all_instances_marked_deleted, sleep_time=2, time_out=CONF.cluster_delete_time_out) except PollTimeOut: LOG.error(_("timeout for instances to be marked as deleted.")) return cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("timeout for shrinking cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end shrink_cluster for MongoDB cluster %s" % self.id)
def grow_cluster(self, context, cluster_id, new_instance_ids): def _grow_cluster(): LOG.debug("begin grow_cluster for Vertica cluster %s" % cluster_id) db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for new cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): return new_insts = [Instance.load(context, instance_id) for instance_id in new_instance_ids] existing_instances = [Instance.load(context, instance_id) for instance_id in instance_ids if instance_id not in new_instance_ids] existing_guests = [self.get_guest(i) for i in existing_instances] new_guests = [self.get_guest(i) for i in new_insts] all_guests = new_guests + existing_guests authorized_users_without_password = ['root', 'dbadmin'] new_ips = [self.get_ip(instance) for instance in new_insts] for user in authorized_users_without_password: pub_key = [guest.get_public_keys(user) for guest in all_guests] for guest in all_guests: guest.authorize_public_keys(user, pub_key) for db_instance in db_instances: if db_instance['type'] == 'master': LOG.debug("Found 'master' instance, calling grow on guest") master_instance = Instance.load(context, db_instance.id) self.get_guest(master_instance).grow_cluster(new_ips) break for guest in new_guests: guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) except Exception: LOG.exception(_("Error growing cluster %s.") % cluster_id) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel()
def piper(self, in_sock, out_sock, out_addr, onkill): "Worker thread for data reading" try: timeout = Timeout(self.transmission_timeout_seconds) try: while True: written = in_sock.recv(32768) if not written: try: out_sock.shutdown(socket.SHUT_WR) except socket.error: self.threads[onkill].kill() break try: out_sock.sendall(written) except socket.error: pass self.data_handled += len(written) finally: timeout.cancel() except greenlet.GreenletExit: return except Timeout: # This one prevents only from closing connection without any data nor status code returned # from mantrid when no data was received from backend. # When it happens, nginx reports 'upstream prematurely closed connection' and returns 500, # and want to have our custom error page to know when it happens. if onkill == "stoc" and self.data_handled == 0: out_sock.sendall("HTTP/1.0 594 Backend timeout\r\nConnection: close\r\nContent-length: 0\r\n\r\n") logging.warn("Timeout serving request to backend %s of %s", self.backend, self.host) return
def execute_with_timeout(*args, **kwargs): time = kwargs.pop('timeout', 30) def cb_timeout(): msg = (_("Time out after waiting" " %(time)s seconds when running proc: %(args)s" " %(kwargs)s") % { 'time': time, 'args': args, 'kwargs': kwargs }) LOG.error(msg) raise exception.ProcessExecutionError(msg) timeout = Timeout(time) try: return execute(*args, **kwargs) except Timeout as t: if t is not timeout: LOG.error("Timeout reached but not from our timeout. This is bad!") raise else: msg = (_("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s") % { 'time': time, 'args': args, 'kwargs': kwargs }) LOG.error(msg) raise exception.ProcessExecutionError(msg) finally: timeout.cancel()
def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s." % cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") instances = [ Instance.load(context, instance_id) for instance_id in instance_ids ] member_ips = [self.get_ip(instance) for instance in instances] guests = [self.get_guest(instance) for instance in instances] # Users to be configured for password-less SSH. authorized_users_without_password = ['root', 'dbadmin'] # Configuring password-less SSH for cluster members. # Strategy for setting up SSH: # get public keys for user from member-instances in cluster, # combine them, finally push it back to all instances, # and member instances add them to authorized keys. LOG.debug("Configuring password-less SSH on cluster members.") try: for user in authorized_users_without_password: pub_key = [guest.get_public_keys(user) for guest in guests] for guest in guests: guest.authorize_public_keys(user, pub_key) LOG.debug("Installing cluster with members: %s." % member_ips) guests[0].install_cluster(member_ips) LOG.debug("Finalizing cluster configuration.") for guest in guests: guest.cluster_complete() except Exception: LOG.exception(_("Error creating cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s." % cluster_id)
def grow_cluster(self, context, cluster_id, new_instance_ids): def _grow_cluster(): LOG.debug("begin grow_cluster for Vertica cluster %s", cluster_id) db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for new cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): return new_insts = [Instance.load(context, instance_id) for instance_id in new_instance_ids] existing_instances = [Instance.load(context, instance_id) for instance_id in instance_ids if instance_id not in new_instance_ids] existing_guests = [self.get_guest(i) for i in existing_instances] new_guests = [self.get_guest(i) for i in new_insts] all_guests = new_guests + existing_guests authorized_users_without_password = ['root', 'dbadmin'] new_ips = [self.get_ip(instance) for instance in new_insts] for user in authorized_users_without_password: pub_key = [guest.get_public_keys(user) for guest in all_guests] for guest in all_guests: guest.authorize_public_keys(user, pub_key) for db_instance in db_instances: if db_instance['type'] == 'master': LOG.debug("Found 'master' instance, calling grow on guest") master_instance = Instance.load(context, db_instance.id) self.get_guest(master_instance).grow_cluster(new_ips) break for guest in new_guests: guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) except Exception: LOG.exception(_("Error growing cluster %s."), cluster_id) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel()
def __call__(self, *args, **kwargs): args_hash = json.dumps((args, kwargs), sort_keys=True) timeout = None incache, cached_response = self.get_from_cache(args_hash) if self.use_cache_first and incache: return cached_response if self.failover_timeout and incache: # we have a value, let's set a timeout and use our cache if service # isn't responsive timeout = Timeout(self.failover_timeout) try: reply = super(CachedMethodProxy, self).__call__(*args, **kwargs) except (Exception, Timeout) as e: error = 'timeout' if isinstance(e, Timeout) else 'error' _log.warn('%s when getting value for %s. using cache', error, self) if incache: _log.warn('response found in cache. using cache') return cached_response else: _log.error('request not in cache, re-raising %s', self.cache) raise e finally: if timeout: timeout.cancel() self.cache[args_hash] = (reply, ) return reply
def rengine_side(self, appid, token, uri): """ Handle rengine (client) GET requests """ if not self.rengine_authorization_ok(appid, token): LOGGER.info('Rengine content request authorization fails') abort(401, 'Authorization failed') evt = Event() request_id = str(uuid4()) self.request_id_events[request_id] = evt headers = [ "%s: %s" % (header, val) for (header, val) in request.headers.items() ] packet = ScpPacket.make_sfkcontent(uri, request_id, headers) try: self._send(packet, appid) except Exception as e: abort(500, str(e)) LOGGER.debug("uri %s expected" % uri) timeout = Timeout(TIMEOUT) try: resp = evt.wait() except Timeout: del self.request_id_events[request_id] abort(504, 'Gateway Timeout') finally: timeout.cancel() LOGGER.debug("uri %s got" % uri) return resp
def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s." % cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") instances = [Instance.load(context, instance_id) for instance_id in instance_ids] # Connect nodes to the first node guests = [self.get_guest(instance) for instance in instances] try: cluster_head = instances[0] cluster_head_port = '6379' cluster_head_ip = self.get_ip(cluster_head) for guest in guests[1:]: guest.cluster_meet(cluster_head_ip, cluster_head_port) num_nodes = len(instances) total_slots = 16384 slots_per_node = total_slots / num_nodes leftover_slots = total_slots % num_nodes first_slot = 0 for guest in guests: last_slot = first_slot + slots_per_node if leftover_slots > 0: leftover_slots -= 1 else: last_slot -= 1 guest.cluster_addslots(first_slot, last_slot) first_slot = last_slot + 1 for guest in guests: guest.cluster_complete() except Exception: LOG.exception(_("Error creating cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s." % cluster_id)
def request(self, address, path, method='GET', params=None, headers={}): t = Timeout(2, RuntimeError("Timeout trying to send request.")) try: conn = httplib.HTTPConnection("%s:%s" % address) conn.request(method, path, params, headers) finally: t.cancel() return conn.getresponse()
def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s." % cluster_id) def _create_cluster(): # Fetch instances by cluster_id against instances table. db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") instances = [Instance.load(context, instance_id) for instance_id in instance_ids] member_ips = [self.get_ip(instance) for instance in instances] guests = [self.get_guest(instance) for instance in instances] # Users to be configured for password-less SSH. authorized_users_without_password = ['root', 'dbadmin'] # Configuring password-less SSH for cluster members. # Strategy for setting up SSH: # get public keys for user from member-instances in cluster, # combine them, finally push it back to all instances, # and member instances add them to authorized keys. LOG.debug("Configuring password-less SSH on cluster members.") try: for user in authorized_users_without_password: pub_key = [guest.get_public_keys(user) for guest in guests] for guest in guests: guest.authorize_public_keys(user, pub_key) LOG.debug("Installing cluster with members: %s." % member_ips) guests[0].install_cluster(member_ips) LOG.debug("Finalizing cluster configuration.") for guest in guests: guest.cluster_complete() except Exception: LOG.exception(_("Error creating cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End create_cluster for id: %s." % cluster_id)
def __exit__(self, type, value, traceback): if self.expected == 0: with Timeout(.5, None): self.event.wait() assert(len(self.received) == 0) return with Timeout(1): self.event.wait() self.event = Event()
def wrapper(*args,**kwargs): timeout = Timeout(time, exception) try: func(*args,**kwargs) except exception as e: if reraise: raise e else: timeout.cancel()
def grow_cluster(self, context, cluster_id, new_instance_ids): """Grow a K2hdkc Cluster.""" LOG.debug( "Begins grow_cluster for %s. new_instance_ids:{}".format( new_instance_ids), cluster_id) # 1. validates args if context is None: LOG.error("no context") return if cluster_id is None: LOG.error("no cluster_id") return if new_instance_ids is None: LOG.error("no new_instance_ids") return timeout = Timeout(CONF.cluster_usage_timeout) try: # 2. Retrieves db_instances from the database db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() LOG.debug("len(db_instances) {}".format(len(db_instances))) # 3. Checks if new instances are ready if not self._all_instances_running(new_instance_ids, cluster_id): LOG.error("instances are not ready yet") return # 4. Loads instances instances = [ Instance.load(context, instance_id) for instance_id in new_instance_ids ] LOG.debug("len(instances) {}".format(len(instances))) # 5. Instances GuestAgent class # 6. Calls cluster_complete endpoint of K2hdkcGuestAgent LOG.debug( "Calling cluster_complete as a final hook to each node in the cluster" ) for instance in instances: self.get_guest(instance).cluster_complete() # 7. reset the current cluster task status to None LOG.debug("reset cluster task to None") self.reset_task() except Timeout: # Note adminstrators should reset task via CLI in this case. if Timeout is not timeout: raise # not my timeout LOG.exception("Timeout for growing cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) finally: timeout.cancel() LOG.debug("Completed grow_cluster for %s.", cluster_id)
def update_data(self): print "updating data" timeout = Timeout(10) try: self.parser = KMLParser() temp = self.parse_data() timeout.cancel() self.bikeways = temp self.update_database() except: print "couldn't get data"
def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s.", cluster_id) def _grow_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() cluster_head = next( Instance.load(context, db_inst.id) for db_inst in db_instances if db_inst.id not in new_instance_ids) if not cluster_head: raise TroveError( _("Unable to determine existing Redis cluster" " member")) (cluster_head_ip, cluster_head_port) = (self.get_guest(cluster_head).get_node_ip()) # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") new_insts = [ Instance.load(context, instance_id) for instance_id in new_instance_ids ] new_guests = map(self.get_guest, new_insts) # Connect nodes to the cluster head for guest in new_guests: guest.cluster_meet(cluster_head_ip, cluster_head_port) for guest in new_guests: guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("Timeout for growing cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) except Exception: LOG.exception("Error growing cluster %s.", cluster_id) self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.GROWING_ERROR) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s.", cluster_id)
def add_shard_cluster(self, context, cluster_id, shard_id, replica_set_name): LOG.debug("begin add_shard_cluster for cluster %s shard %s" % (cluster_id, shard_id)) def _add_shard_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, shard_id=shard_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("instances in shard %s: %s" % (shard_id, instance_ids)) if not self._all_instances_ready(instance_ids, cluster_id, shard_id): return members = [ Instance.load(context, instance_id) for instance_id in instance_ids ] if not self._create_replica_set(members, cluster_id, shard_id): return db_query_routers = DBInstance.find_all(cluster_id=cluster_id, type='query_router', deleted=False).all() query_routers = [ Instance.load(context, db_query_router.id) for db_query_router in db_query_routers ] if not self._create_shard(query_routers, replica_set_name, members, cluster_id, shard_id): return for member in members: self.get_guest(member).cluster_complete() cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _add_shard_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("timeout for building shard.")) self.update_statuses_on_failure(cluster_id, shard_id) finally: timeout.cancel() LOG.debug("end add_shard_cluster for cluster %s shard %s" % (cluster_id, shard_id))
def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional arg *block* is true and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the queue if a free slot is immediately available, else raise the :class:`Full` exception (*timeout* is ignored in that case). """ if self.maxsize is None or self.qsize() < self.maxsize: # there's a free slot, put an item right away self._put(item) if self.getters: self._schedule_unlock() elif not block and get_hub().greenlet is getcurrent(): # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though # find a getter and deliver an item to it while self.getters: getter = self.getters.pop() if getter: self._put(item) item = self._get() getter.switch(item) return raise Full elif block: waiter = ItemWaiter(item, block) self.putters.add(waiter) timeout = Timeout(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % ( result, ) if waiter.item is not _NONE: self._put(item) finally: timeout.cancel() self.putters.discard(waiter) elif self.getters: waiter = ItemWaiter(item, block) self.putters.add(waiter) self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % ( result, ) if waiter.item is not _NONE: raise Full else: raise Full
def _write_with_timeout(self, writer, chunk): timeout = Timeout(self.timeout) try: writer.write(chunk) except Timeout as t: if t is timeout: writer.close() raise t except Exception as e: raise e finally: timeout.cancel()
def check_for_mult_pages(driver): # selenium gets stuck when element is not found, so set a timer timeout = Timeout(1, NoSuchElementException) next_ = None try: # check if there is a Next button on the page next_ = driver.find_element_by_xpath("//*[text()='Next >']") except NoSuchElementException: next_ = None finally: timeout.cancel() return next_
def shrink_cluster(self, context, cluster_id, instance_ids): def _shrink_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() all_instance_ids = [db_instance.id for db_instance in db_instances] remove_instances = [ Instance.load(context, instance_id) for instance_id in instance_ids ] left_instances = [ Instance.load(context, instance_id) for instance_id in all_instance_ids if instance_id not in instance_ids ] remove_member_ips = [ self.get_ip(instance) for instance in remove_instances ] k = VerticaCluster.k_safety(len(left_instances)) for db_instance in db_instances: if db_instance['type'] == 'master': master_instance = Instance.load(context, db_instance.id) if self.get_ip(master_instance) in remove_member_ips: raise RuntimeError(_("Cannot remove master instance!")) LOG.debug("Marking cluster k-safety: %s", k) self.get_guest(master_instance).mark_design_ksafe(k) self.get_guest(master_instance).shrink_cluster( remove_member_ips) break for r in remove_instances: Instance.delete(r) timeout = Timeout(CONF.cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise LOG.exception("Timeout for shrinking cluster.") self.update_statuses_on_failure( cluster_id, status=inst_tasks.InstanceTasks.SHRINKING_ERROR) finally: timeout.cancel() LOG.debug("end shrink_cluster for Vertica cluster id %s", self.id)
def add_shard_cluster(self, context, cluster_id, shard_id, replica_set_name): LOG.debug("begin add_shard_cluster for cluster %s shard %s" % (cluster_id, shard_id)) def _add_shard_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, shard_id=shard_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("instances in shard %s: %s" % (shard_id, instance_ids)) if not self._all_instances_ready(instance_ids, cluster_id, shard_id): return members = [Instance.load(context, instance_id) for instance_id in instance_ids] if not self._create_replica_set(members, cluster_id, shard_id): return db_query_routers = DBInstance.find_all(cluster_id=cluster_id, type='query_router', deleted=False).all() query_routers = [Instance.load(context, db_query_router.id) for db_query_router in db_query_routers] if not self._create_shard(query_routers, replica_set_name, members, cluster_id, shard_id): return for member in members: self.get_guest(member).cluster_complete() cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _add_shard_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("timeout for building shard.")) self.update_statuses_on_failure(cluster_id, shard_id) finally: timeout.cancel() LOG.debug("end add_shard_cluster for cluster %s shard %s" % (cluster_id, shard_id))
def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional arg *block* is true and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the queue if a free slot is immediately available, else raise the :class:`Full` exception (*timeout* is ignored in that case). """ if self.maxsize is None or self.qsize() < self.maxsize: # there's a free slot, put an item right away self._put(item) if self.getters: self._schedule_unlock() elif not block and get_hub().greenlet is getcurrent(): # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though # find a getter and deliver an item to it while self.getters: getter = self.getters.pop() if getter: self._put(item) item = self._get() getter.switch(item) return raise Full elif block: waiter = ItemWaiter(item, block) self.putters.add(waiter) timeout = Timeout(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % (result, ) if waiter.item is not _NONE: self._put(item) finally: timeout.cancel() self.putters.discard(waiter) elif self.getters: waiter = ItemWaiter(item, block) self.putters.add(waiter) self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % (result, ) if waiter.item is not _NONE: raise Full else: raise Full
def _check_health(self): logging.debug("Checking health of %s", self) try: timeout = Timeout(self.healthcheck_timeout_seconds) try: socket = eventlet.connect((self.host, self.port)) finally: timeout.cancel() logging.debug("%s is alive, making sure it is not blacklisted", self) self.blacklisted = False socket.close() except: logging.debug("%s seems dead, will check again later", self)
def recv_events(self, timeout_msecs): events = [] timeout = Timeout(timeout_msecs / 1000.0) try: while True: event = self._event_q.get() events.append(event) except Timeout: pass except Exception as e: raise e finally: timeout.cancel() return events
def create_cluster(self, context, cluster_id): """Create K2hdkcClusterTasks. This function is called in trove.taskmanager.Manager.create_cluster. """ LOG.debug("Begins create_cluster for %s.", cluster_id) # 1. validates args if context is None: LOG.error("no context") return if cluster_id is None: LOG.error("no cluster_id") return timeout = Timeout(CONF.cluster_usage_timeout) LOG.debug("CONF.cluster_usage_timeout %s.", timeout) try: # 2. Retrieves db_instances from the database db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() # 3. Retrieves instance ids from the db_instances instance_ids = [db_instance.id for db_instance in db_instances] # 4. Checks if instances are ready if not self._all_instances_running(instance_ids, cluster_id): LOG.error("instances are not ready yet") return # 5. Loads instances instances = [ Instance.load(context, instance_id) for instance_id in instance_ids ] # 6. Instantiates GuestAgent for each guest instance # 7. Calls cluster_complete endpoint of K2hdkcGuestAgent for instance in instances: self.get_guest(instance).cluster_complete() # 8. reset the current cluster task status to None LOG.debug("reset cluster task to None") self.reset_task() except Timeout: # Note adminstrators should reset task via CLI in this case. if Timeout is not timeout: raise # not my timeout LOG.exception("Timeout for building cluster.") self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("Completed create_cluster for %s.", cluster_id)
def create_cluster(self, context, cluster_id): LOG.debug("begin create_cluster for id: %s", cluster_id) def _create_cluster(): # fetch instances by cluster_id against instances table db_instances = DBInstance.find_all(cluster_id=cluster_id).all() instance_ids = [db_instance.id for db_instance in db_instances] LOG.debug("instances in cluster %(cluster_id)s: %(instance_ids)s", {'cluster_id': cluster_id, 'instance_ids': instance_ids}) if not self._all_instances_ready(instance_ids, cluster_id): return LOG.debug("all instances in cluster %s ready.", cluster_id) instances = [Instance.load(context, instance_id) for instance_id in instance_ids] # filter tidb_server in instances into a new list: query_routers tidb_server = [instance for instance in instances if instance.type == 'tidb_server'] LOG.debug("tidb_server: %s", [instance.id for instance in query_routers]) # filter pd_server in instances into new list: config_servers pd_server = [instance for instance in instances if instance.type == 'pd_server'] LOG.debug("pd_server: %s", [instance.id for instance in pd_server]) # filter tikv into a new list: tikvs tikv = [instance for instance in instances if instance.type == 'tikv'] LOG.debug("tikv: %s", [instance.id for instance in tikv]) cluster_usage_timeout = CONF.cluster_usage_timeout timeout = Timeout(cluster_usage_timeout) try: _create_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception("timeout for building cluster.") self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end create_cluster for id: %s", cluster_id)
def get_schema(name, revision, submod_name, submod_revision, priv): global log # ask frontend/user for missing schema params = {'id': priv['session_id'], 'name': name, 'revision': revision, 'submod_name': submod_name, 'submod_revision': submod_revision} socketio.emit('getschema', params, callback=sio_send) result = (None, None) timeout = Timeout(300) data = None try: # wait for response from the frontend data = sio_wait(priv['session_id']) if data['filename'].lower()[len(data['filename']) - 5:] == '.yang': format = yang.LYS_IN_YANG pass elif data['filename'].lower()[len(data['filename']) - 4:] == '.yin': format = yang.LYS_IN_YIN pass else: return result result = (format, data['data']) except Timeout: # no response received within the timeout print("socketio: getschema timeout.") except (KeyError, AttributeError) as e: # invalid response print(e) print("socketio: invalid getschema_result received.") finally: # we have the response sio_clean(priv['session_id']) timeout.cancel() # store the received file try: site_root = os.path.realpath(os.path.dirname(__file__)) path = os.path.join(site_root, 'userfiles', priv['user'].username, data['filename']) if not os.path.exists(os.path.dirname(path)): try: os.makedirs(os.path.dirname(path)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(path, 'w+') as schema_file: schema_file.write(data['data']) except Exception as e: log.error(e) print(e) return result
def create_cluster(self, context, cluster_id): LOG.debug("Begin create_cluster for id: %s." % cluster_id) def _create_cluster(): cluster_node_ids = self.find_cluster_node_ids(cluster_id) # Wait for cluster nodes to get to cluster-ready status. LOG.debug("Waiting for all nodes to become ready.") if not self._all_instances_ready(cluster_node_ids, cluster_id): return cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) coordinator = self._get_coordinator_node(cluster_nodes) LOG.debug("Initializing the cluster on node '%s'." % coordinator['ip']) # start with the coordinator as it will have all the required # services. guest_node_info = self.build_guest_node_info([coordinator]) # now add all the other nodes so we can get a list of all services # needed to calculate the memory allocation properly. add_node_info = [node for node in cluster_nodes if node != coordinator] guest_node_info.extend(self.build_guest_node_info(add_node_info)) coordinator['guest'].initialize_cluster(guest_node_info) self._add_nodes(coordinator, add_node_info) coordinator['guest'].cluster_complete() LOG.debug("Cluster create finished successfully.") timeout = Timeout(CONF.cluster_usage_timeout) try: with EndNotification(context, cluster_id=cluster_id): _create_cluster() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for building cluster.")) self.update_statuses_on_failure(cluster_id) except Exception: LOG.exception(_("Error creating cluster.")) self.update_statuses_on_failure(cluster_id) raise finally: self.reset_task() timeout.cancel() LOG.debug("End create_cluster for id: %s." % cluster_id)
def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s." % cluster_id) def _grow_cluster(): # Wait for new nodes to get to cluster-ready status. LOG.debug("Waiting for new nodes to become ready.") if not self._all_instances_ready(new_instance_ids, cluster_id): return new_instances = [ Instance.load(context, instance_id) for instance_id in new_instance_ids ] added_nodes = [ self.build_node_info(instance) for instance in new_instances ] LOG.debug("All nodes ready, proceeding with cluster setup.") cluster_node_ids = self.find_cluster_node_ids(cluster_id) cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) # Rebalance the cluster via one of the existing nodes. # Clients can continue to store and retrieve information and # do not need to be aware that a rebalance operation is taking # place. # The new nodes are marked active only if the rebalancing # completes. try: coordinator = cluster_nodes[0] self._add_nodes(coordinator, added_nodes) LOG.debug("Cluster configuration finished successfully.") except Exception: LOG.exception(_("Error growing cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s." % cluster_id)
def __enter__(self): """ Begin the context handling. Clears out any captured data and initializes any timeouts defined for the test. """ # Clear the captured values for this thread capture.retrieve() # If test should be timed, set up the timeout if self.result._test._timeout: self.timeout = Timeout(self.result._test._timeout, AssertionError("Timed out after %s " "seconds" % self.result._test._timeout))
def test_nested_timeout(self): with Timeout(DELAY, False): with Timeout(DELAY*2, False): sleep(DELAY*3) raise AssertionError('should not get there') with Timeout(DELAY) as t1: with Timeout(DELAY*2) as t2: try: sleep(DELAY*3) except Timeout, ex: assert ex is t1, (ex, t1) assert not t1.pending, t1 assert t2.pending, t2 assert not t2.pending, t2
def shrink_cluster(self, context, cluster_id, instance_ids): def _shrink_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id, deleted=False).all() all_instance_ids = [db_instance.id for db_instance in db_instances] remove_instances = [Instance.load(context, instance_id) for instance_id in instance_ids] left_instances = [Instance.load(context, instance_id) for instance_id in all_instance_ids if instance_id not in instance_ids] remove_member_ips = [self.get_ip(instance) for instance in remove_instances] k = VerticaCluster.k_safety(len(left_instances)) for db_instance in db_instances: if db_instance['type'] == 'master': master_instance = Instance.load(context, db_instance.id) if self.get_ip(master_instance) in remove_member_ips: raise RuntimeError(_("Cannot remove master instance!")) LOG.debug(_("Marking cluster k-safety: %s") % k) self.get_guest(master_instance).mark_design_ksafe(k) self.get_guest(master_instance).shrink_cluster( remove_member_ips) break for r in remove_instances: Instance.delete(r) timeout = Timeout(CONF.cluster_usage_timeout) try: _shrink_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise LOG.exception(_("Timeout for shrinking cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("end shrink_cluster for Vertica cluster id %s" % self.id)
def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s." % cluster_id) def _grow_cluster(): db_instances = DBInstance.find_all(cluster_id=cluster_id).all() cluster_head = next(Instance.load(context, db_inst.id) for db_inst in db_instances if db_inst.id not in new_instance_ids) if not cluster_head: raise TroveError("Unable to determine existing Redis cluster " "member") (cluster_head_ip, cluster_head_port) = ( self.get_guest(cluster_head).get_node_ip()) # Wait for cluster members to get to cluster-ready status. if not self._all_instances_ready(new_instance_ids, cluster_id): return LOG.debug("All members ready, proceeding for cluster setup.") new_insts = [Instance.load(context, instance_id) for instance_id in new_instance_ids] new_guests = map(self.get_guest, new_insts) # Connect nodes to the cluster head for guest in new_guests: guest.cluster_meet(cluster_head_ip, cluster_head_port) for guest in new_guests: guest.cluster_complete() timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) except Exception: LOG.exception(_("Error growing cluster %s.") % cluster_id) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s." % cluster_id)
def _connect_put_node(self, nodes, part, path, headers, logger_thread_locals): """Method for a file PUT connect""" self.app.logger.thread_locals = logger_thread_locals for node in nodes: try: start_time = time.time() with ConnectionTimeout(self.app.conn_timeout): conn = http_connect( node['ip'], node['port'], node['device'], part, 'PUT', path, headers) self.app.set_node_timing(node, time.time() - start_time) with Timeout(self.app.node_timeout): resp = conn.getexpect() if resp.status == HTTP_CONTINUE: conn.resp = None conn.node = node return conn elif is_success(resp.status): conn.resp = resp conn.node = node return conn elif headers['If-None-Match'] is not None and \ resp.status == HTTP_PRECONDITION_FAILED: conn.resp = resp conn.node = node return conn elif resp.status == HTTP_INSUFFICIENT_STORAGE: self.app.error_limit(node, _('ERROR Insufficient Storage')) except (Exception, Timeout): self.app.exception_occurred( node, _('Object'), _('Expect: 100-continue on %s') % path)
def _make_request(self, nodes, part, method, path, headers, query, logger_thread_locals): self.app.logger.thread_locals = logger_thread_locals for node in nodes: try: start_node_timing = time.time() with ConnectionTimeout(self.app.conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, method, path, headers=headers, query_string=query) conn.node = node self.app.set_node_timing(node, time.time() - start_node_timing) with Timeout(self.app.node_timeout): resp = conn.getresponse() if not is_informational(resp.status) and \ not is_server_error(resp.status): return resp.status, resp.reason, resp.read() elif resp.status == HTTP_INSUFFICIENT_STORAGE: self.error_limit(node, _('ERROR Insufficient Storage')) except (Exception, Timeout): self.exception_occurred( node, self.server_type, _('Trying to %(method)s %(path)s') % { 'method': method, 'path': path })
def portal_types(self): """ Mapping between history id and portal_type """ if self._portal_types: return self._portal_types shadow = self.storage._getShadowStorage() histIds = shadow._storage self._portal_types = dict(PORTAL_TYPES.items()) for hid in histIds.keys(): if hid in self._portal_types: continue with Timeout(10): try: ob = self.storage.retrieve(hid).object.object except (BrokenModified, POSKeyError): logger.warn("BrokenModified raised for historyid: %s", hid) continue except Exception as err: logger.exception(err) continue if not ob: logger.warn("Timeout raised for history id: %s", hid) continue if isinstance(ob, Removed): continue ptype = ob.getPortalTypeName() logger.warn("Adding hid - portal_type mapping: %s = %s", hid, ptype) self._portal_types[hid] = ptype return self._portal_types
def spawn_accepts(): events = [] for _junk in xrange(2): with Timeout(3): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr)) return events
def test_raising_exception_class(self): # Providing classes instead of values should be possible too: try: with Timeout(DELAY, ValueError): sleep(DELAY * 2) except ValueError: pass
def _connect_put_node(self, host, port, method, path, headers, query_string, ssl=False): try: with ConnectionTimeout(self.conn_timeout): conn = http_connect_raw(host, port, method, path, headers=headers, query_string=query_string, ssl=ssl) if headers.has_key('content-length') and int( headers['content-length']) == 0: return conn with Timeout(self.node_timeout): resp = conn.getexpect() if resp.status == 100: return conn elif resp.status == 507: self.logger.error('507 Insufficient Storage in %s:%s%s' % (host, port, path)) raise Exception except: self.logger.error('Expect: 100-continue on %s:%s%s' % (host, port, path)) return None
def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s." % cluster_id) def _grow_cluster(): # Wait for new nodes to get to cluster-ready status. LOG.debug("Waiting for new nodes to become ready.") if not self._all_instances_ready(new_instance_ids, cluster_id): return new_instances = [Instance.load(context, instance_id) for instance_id in new_instance_ids] add_node_info = [self.build_node_info(instance) for instance in new_instances] LOG.debug("All nodes ready, proceeding with cluster setup.") cluster_node_ids = self.find_cluster_node_ids(cluster_id) cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) old_node_info = [node for node in cluster_nodes if node['id'] not in new_instance_ids] # Rebalance the cluster via one of the existing nodes. # Clients can continue to store and retrieve information and # do not need to be aware that a rebalance operation is taking # place. coordinator = old_node_info[0] self._add_nodes(coordinator, add_node_info) LOG.debug("Cluster grow finished successfully.") timeout = Timeout(CONF.cluster_usage_timeout) try: with EndNotification(context, cluster_id=cluster_id): _grow_cluster() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) except Exception: LOG.exception(_("Error growing cluster.")) raise finally: self.reset_task() timeout.cancel() LOG.debug("End grow_cluster for id: %s." % cluster_id)
def downloadDM(self, timeout=2, cached=False): self.DM = None self.vscp.sendSimpleEvent( vscp_class=constant.VSCP_CLASS1_PROTOCOL, vscp_type=constant.VSCP_TYPE_PROTOCOL_GET_MATRIX_INFO, vscp_data=[self.nodeId], ) timer = Timeout(timeout, self.__timeout) try: while self.DM == None: time.sleep(0.02) timer.cancel() for i in range(self.DM["size"]): self.DM["rows"][i] = self.getRowOfDM(i) except: return constant.VSCP_ERROR_ERROR
def grow_cluster(self, context, cluster_id, new_instance_ids): LOG.debug("Begin grow_cluster for id: %s." % cluster_id) def _grow_cluster(): # Wait for new nodes to get to cluster-ready status. LOG.debug("Waiting for new nodes to become ready.") if not self._all_instances_ready(new_instance_ids, cluster_id): return new_instances = [Instance.load(context, instance_id) for instance_id in new_instance_ids] added_nodes = [self.build_node_info(instance) for instance in new_instances] LOG.debug("All nodes ready, proceeding with cluster setup.") cluster_node_ids = self.find_cluster_node_ids(cluster_id) cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids) # Rebalance the cluster via one of the existing nodes. # Clients can continue to store and retrieve information and # do not need to be aware that a rebalance operation is taking # place. # The new nodes are marked active only if the rebalancing # completes. try: coordinator = cluster_nodes[0] self._add_nodes(coordinator, added_nodes) LOG.debug("Cluster configuration finished successfully.") except Exception: LOG.exception(_("Error growing cluster.")) self.update_statuses_on_failure(cluster_id) timeout = Timeout(CONF.cluster_usage_timeout) try: _grow_cluster() self.reset_task() except Timeout as t: if t is not timeout: raise # not my timeout LOG.exception(_("Timeout for growing cluster.")) self.update_statuses_on_failure(cluster_id) finally: timeout.cancel() LOG.debug("End grow_cluster for id: %s." % cluster_id)
def recv_events(self, timeout_msecs): """ Let explorer receive events :param timeout_msecs: int :return: """ events = [] timeout = Timeout(timeout_msecs / 1000.0) try: while True: event = self._event_q.get() events.append(event) except Timeout: pass except Exception as e: raise e finally: timeout.cancel() return events
def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args *block* is true and *timeout* is ``None`` (the default), block if necessary until an item is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Empty` exception if no item was available within that time. Otherwise (*block* is false), return an item if one is immediately available, else raise the :class:`Empty` exception (*timeout* is ignored in that case). """ if self.qsize(): if self.putters: self._schedule_unlock() return self._get() elif not block and get_hub().greenlet is getcurrent(): # special case to make get_nowait() runnable in the mainloop greenlet # there are no items in the queue; try to fix the situation by unlocking putters while self.putters: putter = self.putters.pop() if putter: putter.switch(putter) if self.qsize(): return self._get() raise Empty elif block: waiter = Waiter() timeout = Timeout(timeout, Empty) try: self.getters.add(waiter) if self.putters: self._schedule_unlock() try: return waiter.wait() except: self._schedule_unlock() raise finally: self.getters.discard(waiter) timeout.cancel() else: raise Empty
def wait(self): do_wait = True up = 0 to = Timeout(self.sandbox_wait_timeout) try: while do_wait == True: rc = self.ping() if (rc != 1): time.sleep(self.sandbox_ping_interval) continue else: to.cancel() do_wait = False up = 1 except Timeout as t: self.logger.info("wait for sandbox %s timedout" % self.account) do_wait = False finally: to.cancel() return up
def execute_with_timeout(*args, **kwargs): time = kwargs.get('timeout', 30) def cb_timeout(): msg = _("Time out after waiting" " %(time)s seconds when running proc: %(args)s" " %(kwargs)s") % locals() raise exception.ProcessExecutionError(msg) timeout = Timeout(time) try: return execute(*args, **kwargs) except Timeout as t: if t is not timeout: raise else: msg = _("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s") % locals() raise exception.ProcessExecutionError(msg) finally: timeout.cancel()
def handle(self, sock, read_data, path, headers): request_id = headers.get("X-Request-Id", "-") for attempt in range(self.attempts): if attempt > 0: logging.warn("[%s] Retrying connection for host %s", request_id, self.host) backend = self.select_backend() try: timeout = Timeout(self.connection_timeout_seconds) try: server_sock = eventlet.connect((backend.host, backend.port)) finally: timeout.cancel() backend.add_connection() break except socket.error: logging.error("[%s] Proxy socket error on connect() to %s of %s", request_id, backend, self.host) self.blacklist(backend) eventlet.sleep(self.delay) continue except: logging.warn("[%s] Proxy timeout on connect() to %s of %s", request_id, backend, self.host) self.blacklist(backend) eventlet.sleep(self.delay) continue # Function to help track data usage def send_onwards(data): server_sock.sendall(data) return len(data) try: size = send_onwards(read_data) size += SocketMelder(sock, server_sock, backend, self.host).run() except socket.error, e: if e.errno != errno.EPIPE: raise
def execute_with_timeout(*args, **kwargs): time = kwargs.pop("timeout", 30) log_output_on_error = kwargs.pop("log_output_on_error", False) timeout = Timeout(time) try: return execute(*args, **kwargs) except exception.ProcessExecutionError as e: if log_output_on_error: LOG.error( _( "Command '%(cmd)s' failed. %(description)s " "Exit code: %(exit_code)s\nstderr: %(stderr)s\n" "stdout: %(stdout)s" ) % { "cmd": e.cmd, "description": e.description or "", "exit_code": e.exit_code, "stderr": e.stderr, "stdout": e.stdout, } ) raise except Timeout as t: if t is not timeout: LOG.error(_("Got a timeout but not the one expected.")) raise else: msg = _("Time out after waiting " "%(time)s seconds when running proc: %(args)s" " %(kwargs)s.") % { "time": time, "args": args, "kwargs": kwargs, } LOG.error(msg) raise exception.ProcessExecutionError(msg) finally: timeout.cancel()