def wait_for_score(self, types, timeout=20.0, score_threshold=35): """Wait for services to have a score greater than the threshold.""" deadline = time.time() + timeout while time.time() < deadline: wait = False for type_ in types: try: all_svcs = self.conscience.all_services(type_) for service in all_svcs: if int(service['score']) < score_threshold: wait = True break else: # No service registered yet, must wait. if not all_svcs: wait = True except Exception as err: logging.warn('Could not check service score: %s', err) wait = True if wait: # No need to check other types, we have to wait anyway break if not wait: return time.sleep(1) logging.info('Service(s) fails to reach %d score (timeout %d)', score_threshold, timeout)
def run(self): """ Main worker loop """ self.start_time = time.time() while not self._stop: try: self.crawl_volume() self.last_scan_time = time.time() time.sleep(self.scans_interval) except exc.OioException as exception: self.logger.exception("ERROR during indexing meta2: %s", exception)
def wait_for_ready_job(self, tube, timeout=float('inf'), poll_interval=0.2): """ Wait until the the specified tube has a ready job, or the timeout expires. """ self.use(tube) job_id, data = self.peek_ready() deadline = time.time() + timeout while job_id is None and time.time() < deadline: time.sleep(poll_interval) job_id, data = self.peek_ready() return job_id, data
def _lock_services(self, type_, services, wait=1.0, score=0): """ Lock specified services, wait for the score to be propagated. """ for svc in services: self.locked_svc.append({'type': type_, 'addr': svc['addr'], 'score': score}) self.conscience.lock_score(self.locked_svc) # In a perfect world™️ we do not need the time.sleep(). # For mysterious reason, all services are not reloaded immediately. self._reload_proxy() time.sleep(wait) self._reload_meta() time.sleep(wait)
def wait_until_empty(self, tube, timeout=float('inf'), poll_interval=0.2, initial_delay=0.0): """ Wait until the the specified tube is empty, or the timeout expires. """ # TODO(FVE): check tube stats to ensure some jobs have passed through # and then get rid of the initial_delay # peek-ready requires "use", not "watch" self.use(tube) if initial_delay > 0.0: time.sleep(initial_delay) job_id, _ = self.peek_ready() deadline = time.time() + timeout while job_id is not None and time.time() < deadline: time.sleep(poll_interval) job_id, _ = self.peek_ready()
def _service(self, name, action, wait=0, socket=None): """ Execute a gridinit action on a service, and optionally sleep for some seconds before returning. :param name: The service upon which the command should be executed. :param action: The command to send. (E.g. 'start' or 'stop') :param wait: The amount of time in seconds to wait after the command. :param socket: The unix socket on which gridinit is listenting. defaults to ~/.oio/sds/run/gridinit.sock """ if not socket: socket = os.path.expanduser('~/.oio/sds/run/gridinit.sock') name = "%s-%s" % (self.ns, name) check_call(['gridinit_cmd', '-S', socket, action, name]) if wait > 0: time.sleep(wait)
def run(self, *args, **kwargs): time.sleep(random() * self.interval) while True: pre = time.time() try: self.index_pass() except exc.VolumeException as err: self.logger.error('Cannot index chunks, will retry later: %s', err) except Exception as err: self.logger.exception('ERROR during indexing: %s', err) else: self.passes += 1 elapsed = (time.time() - pre) or 0.000001 if elapsed < self.interval: time.sleep(self.interval - elapsed)
def fetch_job(self, on_job, timeout=None, **kwargs): job_id = None try: if not self.connected: self.logger.debug('Connecting to %s using tube %s', self.addr, self.tube) self._connect(**kwargs) job_id, data = self.beanstalkd.reserve(timeout=timeout) try: for job_info in on_job(job_id, data, **kwargs): yield job_info except GeneratorExit: # If the reader finishes to handle the job, but does not want # any new job, it will break the generator. This does not mean # the current job has failed, thus we must delete it. self.beanstalkd.delete(job_id) raise except Exception as err: try: self.beanstalkd.bury(job_id) except BeanstalkError as exc: self.logger.error("Could not bury job %s: %s", job_id, exc) exceptions.reraise(err.__class__, err) else: self.beanstalkd.delete(job_id) return except ConnectionError as exc: self.connected = False self.logger.warn('Disconnected from %s using tube %s (job=%s): %s', self.addr, self.tube, job_id, exc) if 'Invalid URL' in str(exc): raise time.sleep(1.0) except exceptions.ExplicitBury as exc: self.logger.warn("Job bury on %s using tube %s (job=%s): %s", self.addr, self.tube, job_id, exc) except BeanstalkError as exc: if isinstance(exc, ResponseError) and 'TIMED_OUT' in str(exc): raise exceptions.OioTimeout() self.logger.exception("ERROR on %s using tube %s (job=%s)", self.addr, self.tube, job_id) except Exception: self.logger.exception("ERROR on %s using tube %s (job=%s)", self.addr, self.tube, job_id)
def _sleep(self): time.sleep(SLEEP_TIME)