def service_status(self): """Do the check and set `last_result` accordingly""" result = False try: with Timeout(self.timeout): result = self._check() except Timeout as err: self.logger.warn('check timed out (%s)', err) except Exception as err: self.logger.warn('check failed: %s', str(err.message)) if self.last_result is None: self.last_result = result for _i in range(0, self.results.size): self.results.append(result) self.logger.info('%s first check returned %s', self.name, result) self.results.append(result) if not any(self.results[-self.fall:]): if self.last_result: self.logger.info('%s status is now down after %d failures', self.name, self.fall) self.last_result = False if all(self.results[-self.rise:]): if not self.last_result: self.logger.info('%s status is now up after %d successes', self.name, self.rise) self.last_result = True return self.last_result
def getresponse(self): """Read the HTTP response from the connection""" # As the server may buffer data before writing it to non-volatile # storage, we don't know if we have to wait while sending data or # while reading response, thus we apply the same timeout to both. with Timeout(self.write_timeout): return self.conn.getresponse()
def run(self): coros = [] queue_url = self.conf.get('queue_url', 'beanstalk://127.0.0.1:11300') concurrency = int_value(self.conf.get('concurrency'), 10) server_gt = greenthread.getcurrent() for url in queue_url.split(';'): for i in range(concurrency): beanstalk = Beanstalk.from_url(url) gt = eventlet.spawn(self.handle, beanstalk) gt.link(_eventlet_stop, server_gt, beanstalk) coros.append(gt) beanstalk, gt = None, None while self.alive: self.notify() try: eventlet.sleep(1.0) except AssertionError: self.alive = False break self.notify() try: with Timeout(self.graceful_timeout) as t: [c.kill(StopServe()) for c in coros] [c.wait() for c in coros] except Timeout as te: if te != t: raise [c.kill() for c in coros]
def test_rebuild_failure(self): meta_chunk = self.meta_chunk() missing_chunk = meta_chunk.pop(1) nb = self.storage_method.ec_nb_data +\ self.storage_method.ec_nb_parity # add errors on other chunks errors = [Timeout(), 404, Exception('failure')] responses = [ FakeResponse(random.choice(errors), '', {}) for i in range(nb - 1) ] def get_response(req): return responses.pop(0) if responses else FakeResponse(404) missing = missing_chunk['num'] nb = self.storage_method.ec_nb_data +\ self.storage_method.ec_nb_parity with set_http_requests(get_response) as conn_record: handler = ECRebuildHandler(meta_chunk, missing, self.storage_method) # TODO use specialized exception self.assertRaises(exc.OioException, handler.rebuild) self.assertEqual(len(conn_record), nb - 1)
def getresponse(self): """Read the HTTP response from the connection""" # As the server may buffer data before writing it to non-volatile # storage, we don't know if we have to wait while sending data or # while reading response, thus we apply the same timeout to both. with Timeout(self.write_timeout): resp = self.conn.getresponse() if self.perfdata is not None: perfdata_rawx = self.perfdata.setdefault('rawx', dict()) url_chunk = self.conn.chunk['url'] upload_end = monotonic_time() perfdata_rawx[url_chunk] = \ perfdata_rawx.get(url_chunk, 0.0) \ + upload_end - self.conn.upload_start return resp
def frag_iter(): pile = GreenPile(len(resps)) while True: for resp in resps: pile.spawn(_get_frag, resp) try: with Timeout(self.read_timeout): frag = [frag for frag in pile] except Timeout as to: logger.error('ERROR while rebuilding: %s', to) except Exception: logger.exception('ERROR while rebuilding') break if not all(frag): break rebuilt_frag = self._reconstruct(frag) yield rebuilt_frag
def _get_response(self, chunk, headers): resp = None parsed = urlparse(chunk.get('real_url', chunk['url'])) try: with green.ConnectionTimeout(self.connection_timeout): conn = io.http_connect( parsed.netloc, 'GET', parsed.path, headers) with Timeout(self.read_timeout): resp = conn.getresponse() if resp.status != 200: logger.warning('Invalid GET response from %s: %s %s', chunk, resp.status, resp.reason) resp = None except (SocketError, Timeout) as err: logger.error('ERROR fetching %s: %s', chunk, err) except Exception: logger.exception('ERROR fetching %s', chunk) return resp
def test_rebuild_parity_errors(self): test_data = (b'1234' * self.storage_method.ec_segment_size)[:-777] ec_chunks = self._make_ec_chunks(test_data) # break one parity chunk missing_chunk_body = ec_chunks.pop(-1) meta_chunk = self.meta_chunk() missing_chunk = meta_chunk.pop(-1) # add also error on another chunk for error in (Timeout(), 404, Exception('failure')): headers = {} base_responses = list() for ec_chunk in ec_chunks: base_responses.append(FakeResponse(200, ec_chunk, headers)) responses = base_responses error_idx = random.randint(0, len(responses) - 1) responses[error_idx] = FakeResponse(error, b'', {}) def get_response(req): return responses.pop(0) if responses else FakeResponse(404) missing = missing_chunk['num'] nb = self.storage_method.ec_nb_data +\ self.storage_method.ec_nb_parity with set_http_requests(get_response) as conn_record: handler = ECRebuildHandler(meta_chunk, missing, self.storage_method) expected_chunk_size, stream = handler.rebuild() if expected_chunk_size is not None: self.assertEqual(expected_chunk_size, len(missing_chunk_body)) result = b''.join(stream) self.assertEqual(len(result), len(missing_chunk_body)) self.assertEqual( self.checksum(result).hexdigest(), self.checksum(missing_chunk_body).hexdigest()) self.assertEqual(len(conn_record), nb - 1)
def test_write_timeout(self): checksum = self.checksum() source = empty_stream() size = CHUNK_SIZE meta_chunk = self.meta_chunk() resps = [201] * (len(meta_chunk) - 1) resps.append(Timeout(1.0)) with set_http_connect(*resps): handler = ReplicatedMetachunkWriter( self.sysmeta, meta_chunk, checksum, self.storage_method) bytes_transferred, checksum, chunks = handler.stream(source, size) self.assertEqual(len(chunks), len(meta_chunk)-1) for i in range(len(meta_chunk) - 1): self.assertEqual(chunks[i].get('error'), None) # # JFS: starting at branche 3.x, it has been preferred to save only # # the chunks that succeeded. # self.assertEqual( # chunks[len(meta_chunk) - 1].get('error'), '1.0 second') self.assertEqual(bytes_transferred, 0) self.assertEqual(checksum, EMPTY_MD5)
def read(self, size): raise Timeout(1.0)