def rebuilder_pass(self, num, queue, retry_queue=None, **kwargs): while True: info = None err = None item = queue.get() try: info = self._rebuild_one(item, **kwargs) except exceptions.RetryLater as exc: if retry_queue: self.logger.warn("Putting an item in the retry queue: %s", exc.args[1]) retry_queue.put(exc.args[0]) else: err = str(exc) except Exception as exc: err = str(exc) queue.task_done() self.update_processed(item, info, error=err, **kwargs) self.log_report(**kwargs) self.items_run_time = ratelimit(self.items_run_time, self.max_items_per_second) if self.random_wait: eventlet.sleep(random.randint(0, self.random_wait) / 1.0e6)
def run(self): coros = [] queue_url = self.conf.get('queue_url', 'beanstalk://127.0.0.1:11300') concurrency = int_value(self.conf.get('concurrency'), 10) server_gt = greenthread.getcurrent() for url in queue_url.split(';'): for i in range(concurrency): beanstalk = Beanstalk.from_url(url) gt = eventlet.spawn(self.handle, beanstalk) gt.link(_eventlet_stop, server_gt, beanstalk) coros.append(gt) beanstalk, gt = None, None while self.alive: self.notify() try: eventlet.sleep(1.0) except AssertionError: self.alive = False break self.notify() try: with Timeout(self.graceful_timeout) as t: [c.kill(StopServe()) for c in coros] [c.wait() for c in coros] except Timeout as te: if te != t: raise [c.kill() for c in coros]
def stop(self, graceful=True): sig = signal.SIGTERM if not graceful: sig = signal.SIGQUIT limit = time.time() + 5 self.kill_workers(sig) while self.workers and time.time() < limit: eventlet.sleep(0.1) self.kill_workers(signal.SIGKILL)
def _delete(self, conn): sysmeta = conn['sysmeta'] try_number = TRY_REQUEST_NUMBER while True: try: conn['backblaze'].delete(self.backblaze_info['bucket_name'], sysmeta) break except BackblazeException as b2e: if try_number == 0: raise OioException('backblaze delete error: %s' % str(b2e)) else: eventlet.sleep(pow(2, TRY_REQUEST_NUMBER - try_number)) try_number -= 1
def rebuilder_pass(self, num, queue, **kwargs): while True: info = None err = None item = queue.get() try: info = self._rebuild_one(item, **kwargs) except Exception as exc: err = str(exc) queue.task_done() self.update_processed(item, info, error=err, **kwargs) self.log_report(**kwargs) self.items_run_time = ratelimit(self.items_run_time, self.max_items_per_second) if self.random_wait: eventlet.sleep(random.randint(0, self.random_wait) / 1.0e6)
def handle(self, beanstalk): conn_error = False try: if self.tube: beanstalk.use(self.tube) beanstalk.watch(self.tube) while True: try: job_id, data = beanstalk.reserve() if conn_error: self.logger.warn("beanstalk reconnected") conn_error = False except ConnectionError: if not conn_error: self.logger.warn("beanstalk connection error") conn_error = True eventlet.sleep(BEANSTALK_RECONNECTION) continue event = self.safe_decode_job(job_id, data) if not event: self.logger.warn("Burying event %s: %s", job_id, "malformed") beanstalk.bury(job_id) else: try: self.process_event(job_id, event, beanstalk) except (ClientException, OioNetworkException) as exc: self.logger.warn("Burying event %s (%s): %s", job_id, event.get('event'), exc) beanstalk.bury(job_id) except ExplicitBury: self.logger.info("Burying event %s (%s)", job_id, event.get('event')) beanstalk.bury(job_id) except StopServe: self.logger.info("Releasing event %s (%s): stopping", job_id, event.get('event')) beanstalk.release(job_id) except Exception: self.logger.exception("Burying event %s: %s", job_id, event) beanstalk.bury(job_id) except StopServe: pass
def _make_stream(self, source): result = None data = None for chunk in self.chunks: self.meta['name'] = _get_name(chunk) try_number = TRY_REQUEST_NUMBER while True: try: data = source.download(self.backblaze_info['bucket_name'], self.meta, self.headers) break except BackblazeException as b2e: if try_number == 0: raise OioException('backblaze download error: %s' % str(b2e)) else: eventlet.sleep(pow(2, TRY_REQUEST_NUMBER - try_number)) try_number -= 1 if data: result = data return result
def _upload_chunks(self, conn, size, sha1, md5, temp): try_number = TRY_REQUEST_NUMBER while True: self.meta_chunk[0]['size'] = size try: conn['backblaze'].upload(self.backblaze_info['bucket_name'], self.sysmeta, temp, sha1) break except BackblazeException as b2e: temp.seek(0) if try_number == 0: logger.debug('headers sent: %s' % str(b2e.headers_send)) raise OioException('backblaze upload error: %s' % str(b2e)) else: sleep_time_default = pow(2, TRY_REQUEST_NUMBER - try_number) sleep = b2e.headers_received.get("Retry-After", sleep_time_default) eventlet.sleep(sleep) try_number -= 1 self.meta_chunk[0]['hash'] = md5 return self.meta_chunk[0]["size"], self.meta_chunk
def run(self): self.start() try: self.manage_workers() while True: sig = self.sig_queue.pop(0) if self.sig_queue else None if sig is None: eventlet.sleep(1) self.manage_workers() continue if sig not in self.SIG_NAMES: self.logger.info('Ignoring unknown signal: %s', sig) continue signame = self.SIG_NAMES.get(sig) handler = getattr(self, "handle_%s" % signame, None) if not handler: self.logger.error("Unhandled signal: %s", signame) continue self.logger.info("Handling signal: %s", signame) handler() except StopIteration: self.halt() except KeyboardInterrupt: self.halt() except HaltServer as h: self.halt(reason=h.reason, exit_status=h.exit_status) except SystemExit: raise except Exception: self.logger.info("Unhandled exception in main loop", exc_info=True) self.stop(False) sys.exit(-1)
def spawn_workers(self): for i in range(self.num_workers - len(self.workers)): self.spawn_worker() eventlet.sleep(0.1 * random.random())
def handle_quit(self, sig, frame): self.alive = False eventlet.sleep(0.1) sys.exit(0)
def _stream_big_chunks(self, source, conn, temp): max_chunk_size = conn['backblaze'].BACKBLAZE_MAX_CHUNK_SIZE sha1_array = list() res = None size, sha1, md5 = _read_to_temp(max_chunk_size, source, self.checksum, temp) if size <= 0: return 0, list() # obligated to read max_chunk_size + 1 bytes # if the size of the file is max_chunk_size # backblaze will not take it because # the upload part must have at least 2 parts first_byte = source.read(1) if not first_byte: return self._upload_chunks(conn, size, sha1, md5, temp) tries = TRY_REQUEST_NUMBER while True: try: res = conn['backblaze'].upload_part_begin( self.backblaze_info['bucket_name'], self.sysmeta) break except BackblazeException as b2e: tries -= 1 if tries == 0: logger.debug('headers sent: %s' % str(b2e.headers_send)) raise OioException('Error at the beginning of upload: %s' % str(b2e)) else: eventlet.sleep(pow(2, TRY_REQUEST_NUMBER - tries)) file_id = res['fileId'] part_num = 1 bytes_read = size + 1 tries = TRY_REQUEST_NUMBER while True: while True: if bytes_read + max_chunk_size > self.meta_chunk[0]['size']: to_read = self.meta_chunk[0]['size'] - bytes_read else: to_read = max_chunk_size try: res, sha1 = conn['backblaze'].upload_part( file_id, temp, part_num, sha1) break except BackblazeException as b2e: temp.seek(0) tries = tries - 1 if tries == 0: logger.debug("headers sent: %s" % str(b2e.headers_send)) raise OioException('Error during upload: %s' % str(b2e)) else: val_tmp = pow(2, TRY_REQUEST_NUMBER - tries) eventlet.sleep( b2e.headers_received.get('Retry-After', val_tmp)) part_num += 1 sha1_array.append(sha1) temp.seek(0) temp.truncate(0) size, sha1, md5 = _read_to_temp(to_read, source, self.checksum, temp, first_byte) first_byte = None bytes_read = bytes_read + size if size == 0: break tries = TRY_REQUEST_NUMBER while True: try: res = conn['backblaze'].upload_part_end(file_id, sha1_array) break except BackblazeException as b2e: tries = tries - 1 if tries == 0: logger.warn('headers send: %s' % str(b2e.headers_send)) raise OioException('Error at the end of upload: %s' % str(b2e)) else: eventlet.sleep(pow(2, TRY_REQUEST_NUMBER - tries)) self.meta_chunk[0]['hash'] = md5 return bytes_read, self.meta_chunk