def do_lzop_get(creds, uri, path, decrypt, do_retry=True): """ Get and decompress a Swift URL This streams the content directly to lzop; the compressed version is never stored on disk. """ assert uri.endswith('.lzo'), 'Expect an lzop-compressed file' def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt): def standard_detail_message(prefix=''): return (prefix + ' There have been {n} attempts to fetch wal ' 'file {uri} so far.'.format(n=exc_processor_cxt, uri=uri)) typ, value, tb = exc_tup del exc_tup # Screen for certain kinds of known-errors to retry from if issubclass(typ, socket.error): socketmsg = value[1] if isinstance(value, tuple) else value logger.info( msg='Retrying fetch because of a socket error', detail=standard_detail_message( "The socket error's message is '{0}'.".format(socketmsg))) else: # For all otherwise untreated exceptions, report them as a # warning and retry anyway -- all exceptions that can be # justified should be treated and have error messages # listed. logger.warning( msg='retrying WAL file fetch from unexpected exception', detail=standard_detail_message( 'The exception type is {etype} and its value is ' '{evalue} and its traceback is {etraceback}'.format( etype=typ, evalue=value, etraceback=''.join(traceback.format_tb(tb))))) # Help Python GC by resolving possible cycles del tb def download(): with files.DeleteOnError(path) as decomp_out: with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl: conn = calling_format.connect(creds) g = gevent.spawn(write_and_return_error, uri, conn, pl.stdin) # Raise any exceptions from write_and_return_error try: exc = g.get() if exc is not None: raise exc except ClientException as e: if e.http_status == 404: # Do not retry if the key not present, this # can happen under normal situations. pl.abort() logger.warning( msg=('could no longer locate object while ' 'performing wal restore'), detail=('The absolute URI that could not be ' 'located is {uri}.'.format(uri=uri)), hint=('This can be normal when Postgres is trying ' 'to detect what timelines are available ' 'during restoration.')) decomp_out.remove_regardless = True return False else: raise logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{uri}" to "{path}"'. format(uri=uri, path=path)) return True if do_retry: download = retry( retry_with_count(log_wal_fetch_failures_on_error))(download) return download()
def do_lzop_get(creds, url, path, decrypt, do_retry=True): """ Get and decompress a S3 URL This streams the content directly to lzop; the compressed version is never stored on disk. """ assert url.endswith('.lzo'), 'Expect an lzop-compressed file' def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt): def standard_detail_message(prefix=''): return (prefix + ' There have been {n} attempts to fetch wal ' 'file {url} so far.'.format(n=exc_processor_cxt, url=url)) typ, value, tb = exc_tup del exc_tup # Screen for certain kinds of known-errors to retry from if issubclass(typ, socket.error): socketmsg = value[1] if isinstance(value, tuple) else value logger.info( msg='Retrying fetch because of a socket error', detail=standard_detail_message( "The socket error's message is '{0}'." .format(socketmsg))) elif (issubclass(typ, boto.exception.S3ResponseError) and value.error_code == 'RequestTimeTooSkewed'): logger.info(msg='Retrying fetch because of a Request Skew time', detail=standard_detail_message()) else: # For all otherwise untreated exceptions, report them as a # warning and retry anyway -- all exceptions that can be # justified should be treated and have error messages # listed. logger.warning( msg='retrying WAL file fetch from unexpected exception', detail=standard_detail_message( 'The exception type is {etype} and its value is ' '{evalue} and its traceback is {etraceback}' .format(etype=typ, evalue=value, etraceback=''.join(traceback.format_tb(tb))))) # Help Python GC by resolving possible cycles del tb def download(): with files.DeleteOnError(path) as decomp_out: key = _uri_to_key(creds, url) with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl: g = gevent.spawn(write_and_return_error, key, pl.stdin) try: # Raise any exceptions from write_and_return_error exc = g.get() if exc is not None: raise exc except boto.exception.S3ResponseError as e: if e.status == 404: # Do not retry if the key not present, this # can happen under normal situations. pl.abort() logger.info( msg=('could no longer locate object while ' 'performing wal restore'), detail=('The absolute URI that could not be ' 'located is {url}.'.format(url=url)), hint=('This can be normal when Postgres is trying ' 'to detect what timelines are available ' 'during restoration.')) decomp_out.remove_regardless = True return False else: raise logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{url}" to "{path}"' .format(url=url, path=path)) return True if do_retry: download = retry( retry_with_count(log_wal_fetch_failures_on_error))(download) return download()
def do_lzop_get(creds, url, path, decrypt, do_retry=True): """ Get and decompress a WABS URL This streams the content directly to lzop; the compressed version is never stored on disk. """ assert url.endswith('.lzo'), 'Expect an lzop-compressed file' assert url.startswith('wabs://') conn = BlobService( creds.account_name, creds.account_key, sas_token=creds.access_token, protocol='https') def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt): def standard_detail_message(prefix=''): return (prefix + ' There have been {n} attempts to fetch wal ' 'file {url} so far.'.format(n=exc_processor_cxt, url=url)) typ, value, tb = exc_tup del exc_tup # Screen for certain kinds of known-errors to retry from if issubclass(typ, socket.error): socketmsg = value[1] if isinstance(value, tuple) else value logger.info( msg='Retrying fetch because of a socket error', detail=standard_detail_message( "The socket error's message is '{0}'." .format(socketmsg))) else: # For all otherwise untreated exceptions, report them as a # warning and retry anyway -- all exceptions that can be # justified should be treated and have error messages # listed. logger.warning( msg='retrying WAL file fetch from unexpected exception', detail=standard_detail_message( 'The exception type is {etype} and its value is ' '{evalue} and its traceback is {etraceback}' .format(etype=typ, evalue=value, etraceback=''.join(traceback.format_tb(tb))))) # Help Python GC by resolving possible cycles del tb def download(): with files.DeleteOnError(path) as decomp_out: with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl: g = gevent.spawn(write_and_return_error, url, conn, pl.stdin) try: # Raise any exceptions guarded by # write_and_return_error. exc = g.get() if exc is not None: raise exc except AzureMissingResourceHttpError: # Short circuit any re-try attempts under certain race # conditions. pl.abort() logger.warning( msg=('could no longer locate object while ' 'performing wal restore'), detail=('The absolute URI that could not be ' 'located is {url}.'.format(url=url)), hint=('This can be normal when Postgres is trying ' 'to detect what timelines are available ' 'during restoration.')) decomp_out.remove_regardless = True return False logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{url}" to "{path}"' .format(url=url, path=path)) return True if do_retry: download = retry( retry_with_count(log_wal_fetch_failures_on_error))(download) return download()
def do_lzop_get(creds, url, path, decrypt, do_retry=True): """ Get and decompress a WABS URL This streams the content directly to lzop; the compressed version is never stored on disk. """ assert url.endswith('.lzo'), 'Expect an lzop-compressed file' assert url.startswith('wabs://') conn = BlockBlobService( creds.account_name, creds.account_key, sas_token=creds.access_token, protocol='https') def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt): def standard_detail_message(prefix=''): return (prefix + ' There have been {n} attempts to fetch wal ' 'file {url} so far.'.format(n=exc_processor_cxt, url=url)) typ, value, tb = exc_tup del exc_tup # Screen for certain kinds of known-errors to retry from if issubclass(typ, socket.error): socketmsg = value[1] if isinstance(value, tuple) else value logger.info( msg='Retrying fetch because of a socket error', detail=standard_detail_message( "The socket error's message is '{0}'." .format(socketmsg))) else: # For all otherwise untreated exceptions, report them as a # warning and retry anyway -- all exceptions that can be # justified should be treated and have error messages # listed. logger.warning( msg='retrying WAL file fetch from unexpected exception', detail=standard_detail_message( 'The exception type is {etype} and its value is ' '{evalue} and its traceback is {etraceback}' .format(etype=typ, evalue=value, etraceback=''.join(traceback.format_tb(tb))))) # Help Python GC by resolving possible cycles del tb def download(): with files.DeleteOnError(path) as decomp_out: with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl: g = gevent.spawn(write_and_return_error, url, conn, pl.stdin) try: # Raise any exceptions guarded by # write_and_return_error. exc = g.get() if exc is not None: raise exc except AzureMissingResourceHttpError: # Short circuit any re-try attempts under certain race # conditions. pl.abort() logger.warning( msg=('could no longer locate object while ' 'performing wal restore'), detail=('The absolute URI that could not be ' 'located is {url}.'.format(url=url)), hint=('This can be normal when Postgres is trying ' 'to detect what timelines are available ' 'during restoration.')) decomp_out.remove_regardless = True return False logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{url}" to "{path}"' .format(url=url, path=path)) return True if do_retry: download = retry( retry_with_count(log_wal_fetch_failures_on_error))(download) return download()
hint=('This can be normal when Postgres is trying ' 'to detect what timelines are available ' 'during restoration.')) decomp_out.remove_regardless = True return False else: raise logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{url}" to "{path}"'. format(url=url, path=path)) return True if do_retry: download = retry( retry_with_count(log_wal_fetch_failures_on_error))(download) return download() def sigv4_check_apply(): # Insist that one of AWS_REGION or WALE_S3_ENDPOINT is defined. # The former is for authenticating correctly with AWS SigV4. # # The latter is for alternate implementations that are # S3-interface compatible. Many, or most, of these do not support # AWS SigV4 at all and none are known to require SigV4 (and # instead use the non-region-demanding SigV2), so simplify by # relaxing the AWS_REGION requirement in that case. region = os.getenv('AWS_REGION') endpoint = os.getenv('WALE_S3_ENDPOINT')
detail=('The absolute URI that could not be ' 'located is {url}.'.format(url=url)), hint=('This can be normal when Postgres is trying ' 'to detect what timelines are available ' 'during restoration.')) return False else: raise logger.info( msg='completed download and decompression', detail='Downloaded and decompressed "{url}" to "{path}"' .format(url=url, path=path)) return True if do_retry: download = retry( retry_with_count(log_wal_fetch_failures_on_error))(download) return download() def write_and_return_error(key, stream): try: key.get_contents_to_file(stream) stream.flush() except Exception, e: return e finally: stream.close()