def test_linear_retry_interval(self): # Arrange context_stub = RetryContext() for i in range(10): # Act retry_policy = LinearRetry(backoff=1, random_jitter_range=3) backoff = retry_policy._backoff(context_stub) # Assert backoff interval is within +/- 3 of 1 self.assertTrue(0 <= backoff <= 4) # Act retry_policy = LinearRetry(backoff=5, random_jitter_range=3) backoff = retry_policy._backoff(context_stub) # Assert backoff interval is within +/- 3 of 5 self.assertTrue(2 <= backoff <= 8) # Act retry_policy = LinearRetry(backoff=15, random_jitter_range=3) backoff = retry_policy._backoff(context_stub) # Assert backoff interval is within +/- 3 of 15 self.assertTrue(12 <= backoff <= 18)
async def ExcelToCsv(From_Blob,From_Folder,To_Blob,To_Folder, remove_file_in_to_blob=False,strSheetList="all"): try: output="" logging.info("Process Started") STORAGEACCOUNTNAME= os.environ['BLOB_ACCOUNT_NAME'] STORAGEACCOUNTKEY= os.environ['BLOB_FUNCTION_KEY'] CONTAINERNAME= From_Blob FOLDERNAME=From_Folder UPLOADCONTAINERNAME =To_Blob TARGETFOLDERNAME = To_Folder TEMPPATH= str(tempfile.gettempdir()) EXCELINPUTTEMPPATH = TEMPPATH+"/excelinput/"+FOLDERNAME CSVOUTPUTTEMPPATH = TEMPPATH+"/csvoutput/"+FOLDERNAME #download from blob t1=time.time() blob_service=BlockBlobService(account_name=STORAGEACCOUNTNAME,account_key=STORAGEACCOUNTKEY) blob_service.retry = LinearRetry().retry #Remove file in to_blob before it starts if remove_file_in_to_blob==True and TARGETFOLDERNAME !="" : logging.info('Remove File in To Blob, variable set as True and Target folder name is not empty') delete_azure_files_in_container(blob_service,UPLOADCONTAINERNAME,TARGETFOLDERNAME) logging.info('Connected to blob') generator = blob_service.list_blobs(CONTAINERNAME,prefix=FOLDERNAME) blobcount = 0 #create folder in temp path shutil.rmtree(EXCELINPUTTEMPPATH, ignore_errors=True) os.makedirs(EXCELINPUTTEMPPATH) #create folder in temp path shutil.rmtree(CSVOUTPUTTEMPPATH, ignore_errors=True) os.makedirs(CSVOUTPUTTEMPPATH) logging.info('csvoutput temp folder created: {}'.format(CSVOUTPUTTEMPPATH)) for blob in generator: BLOBNAME=blob.name strFileNameInTemp= re.sub(r'.*/','',BLOBNAME) logging.info("File Name is {}".format(strFileNameInTemp)) # Check if it's the specified folder we are looking for to upload # If it's not move to next one if "placeholder.txt" in BLOBNAME: continue # check if the file is xlsx elif "xls" not in strFileNameInTemp.lower(): logging.info(strFileNameInTemp+" not xlsx") continue await process_excel_in_blob(blob_service,CONTAINERNAME,BLOBNAME,strFileNameInTemp,EXCELINPUTTEMPPATH,CSVOUTPUTTEMPPATH,strSheetList,TARGETFOLDERNAME,UPLOADCONTAINERNAME) blobcount = blobcount + 1 t2=time.time() if blobcount > 0: output ="success. Time Taken- "+str(t2-t1)+"." else: # no blob being processed output ="Error - No file being uploaded for Processing" except Exception as Ex : output ="Error -"+str(Ex) return output
def setUp(self): super(StorageTableBatchTest, self).setUp() self.ts = self._create_storage_service(TableService, self.settings) self.ts.retry = LinearRetry(backoff=1, max_attempts=2).retry self.test_tables = [] self.table_name = self._get_table_reference() if not self.is_playback(): self.ts.create_table(self.table_name)
def test_queue_service_stats_when_unavailable(self): # Arrange qs = self._create_storage_service(QueueService, self.settings) qs.response_callback = self.override_response_body_with_unavailable_status qs.retry = LinearRetry(backoff=1).retry # Act stats = qs.get_queue_service_stats() # Assert self._assert_stats_unavailable(stats)
def test_blob_service_stats_when_unavailable(self): # Arrange bs = self._create_storage_service(BlockBlobService, self.settings) bs.response_callback = self.override_response_body_with_unavailable_status bs.retry = LinearRetry(backoff=1).retry # Act stats = bs.get_blob_service_stats() # Assert self._assert_stats_unavailable(stats)
def retries(self): # By default, retries are performed with an exponential backoff. # Any custom retry logic may be used by simply defining a retry function, # but several easy pre-written options are available with modifiable settings. client = TableService(account_name='<account_name>', account_key='<account_key>') # Use an exponential retry, but modify the backoff settings # Here, we increase the initial back off, increase the number of retry attempts # and decrease the base of the exponential backoff. client.retry = ExponentialRetry(initial_backoff=30, increment_power=2, max_attempts=5).retry # Use a default linear retry policy instead client.retry = LinearRetry().retry # Turn off retries client.retry = no_retry
def test_linear_retry(self): # Arrange container_name = self.get_resource_name() service = self._create_storage_service(BlockBlobService, self.settings) service.retry = LinearRetry(backoff=1).retry # Force the create call to 'timeout' with a 408 service.response_callback = ResponseCallback( status=201, new_status=408).override_status # Act try: created = service.create_container(container_name) finally: service.delete_container(container_name) # Assert # The initial create will return 201, but we overwrite it and retry. # The retry will then get a 409 and return false. self.assertFalse(created)
def upload_normalized(scene, bands, product): from azure.storage.common.retry import ( # ExponentialRetry, LinearRetry, # no_retry ) connection_string = open( '/home/amit/telluric/secrets/azure-storage-connection-string').read( ).rstrip() svc = BlockBlobService(connection_string=connection_string) svc.retry = LinearRetry(max_attempts=20).retry container = 'calval' blob_prefix = scene.sceneinfo.blob_prefix(product, scene.timestamp) fname_prefix = scene.sceneinfo.fname_prefix(product, scene.timestamp) for band in bands: local_path = scene.get_normalized_path(band, product=product) filelen = os.stat(local_path).st_size blob_name = '{}/{}_{}.tif'.format(blob_prefix, fname_prefix, band) with TqdmUpTo(unit='B', unit_scale=True, miniters=1, total=filelen, desc=blob_name) as t: svc.create_blob_from_path(container, blob_name, local_path, progress_callback=t.update_to) local_path = scene.get_metadata_path(product) filelen = os.stat(local_path).st_size blob_name = '{}/{}_metadata.json'.format(blob_prefix, fname_prefix) with TqdmUpTo(unit='B', unit_scale=True, miniters=1, total=filelen, desc=blob_name) as t: svc.create_blob_from_path(container, blob_name, local_path, progress_callback=t.update_to)
def test_retry_callback_and_retry_context(self): # Arrange container_name = self.get_resource_name() service = self._create_storage_service(BlockBlobService, self.settings) service.retry = LinearRetry(backoff=1).retry # Force the create call to 'timeout' with a 408 service.response_callback = ResponseCallback( status=201, new_status=408).override_status def assert_exception_is_present_on_retry_context(retry_context): self.assertIsNotNone(retry_context.exception) service.retry_callback = assert_exception_is_present_on_retry_context # Act try: service.create_container(container_name) finally: service.response_callback = None service.delete_container(container_name)
def test_retry_on_socket_timeout(self): # Arrange container_name = self.get_resource_name() service = self._create_storage_service(BlockBlobService, self.settings) service.retry = LinearRetry(backoff=1).retry # make the connect timeout reasonable, but packet timeout truly small, to make sure the request always times out service.socket_timeout = (11, 0.000000000001) # Act try: service.create_container(container_name) except AzureException as e: # Assert # This call should succeed on the server side, but fail on the client side due to socket timeout self.assertTrue( 'read timeout' in str(e), 'Expected socket timeout but got different exception.') pass finally: # we must make the timeout normal again to let the delete operation succeed service.socket_timeout = (11, 11) service.delete_container(container_name)
def run_circuit_breaker(): # Name of image to use for testing. image_to_upload = "HelloWorld.png" global blob_client global container_name try: # Create a reference to the blob client and container using the storage account name and key blob_client = BlockBlobService(account_name, account_key) # Make the container unique by using a UUID in the name. container_name = "democontainer" + str(uuid.uuid4()) blob_client.create_container(container_name) except Exception as ex: print( "Please make sure you have put the correct storage account name and key." ) print(ex) # Define a reference to the actual blob and upload the block_blob to the newly created container full_path_to_file = os.path.join(os.path.dirname(__file__), image_to_upload) blob_client.create_blob_from_path(container_name, image_to_upload, full_path_to_file) # Set the location mode to secondary, so you can check just the secondary data center. blob_client.location_mode = LocationMode.SECONDARY blob_client.retry = LinearRetry(backoff=0).retry # Before proceeding, wait until the blob has been replicated to the secondary data center. # Loop and check for the presence of the blob once in a second until it hits 60 seconds # or until it finds it counter = 0 while counter < 60: counter += 1 sys.stdout.write( "\nAttempt {0} to see if the blob has replicated to the secondary storage yet." .format(counter)) sys.stdout.flush() if blob_client.exists(container_name, image_to_upload): break # Wait a second, then loop around and try again # When it's finished replicating to the secondary, continue. time.sleep(1) # Set the starting LocationMode to Primary, then Secondary. # Here we use the linear retry by default, but allow it to retry to secondary if # the initial request to primary fails. # Note that the default is Primary. You must have RA-GRS enabled to use this blob_client.location_mode = LocationMode.PRIMARY blob_client.retry = LinearRetry(max_attempts=retry_threshold, backoff=1).retry ''' ************INSTRUCTIONS**************k To perform the test, first replace the 'accountname' and 'accountkey' with your storage account name and key. Every time it calls get_blob_to_path it will hit the response_callback function. Next, run this app. While this loop is running, pause the program by pressing any key, and put the intercept code in Fiddler (that will intercept and return a 503). For instructions on modifying Fiddler, look at the Fiddler_script.text file in this project. There are also full instructions in the ReadMe_Instructions.txt file included in this project. After adding the custom script to Fiddler, calls to primary storage will fail with a retryable error which will trigger the Retrying event (above). Then it will switch over and read the secondary. It will do that 20 times, then try to switch back to the primary. After seeing that happen, pause this again and remove the intercepting Fiddler code Then you'll see it return to the primary and finish. ''' print("\n\nThe application will pause at 200 unit interval") for i in range(0, 1000): if blob_client.location_mode == LocationMode.SECONDARY: sys.stdout.write("S{0} ".format(str(i))) else: sys.stdout.write("P{0} ".format(str(i))) sys.stdout.flush() try: # These function is called immediately after retry evaluation is performed. # It is used to trigger the change from primary to secondary and back blob_client.retry_callback = retry_callback # Download the file blob_client.get_blob_to_path( container_name, image_to_upload, str.replace(full_path_to_file, ".png", "Copy.png")) # Set the application to pause at 200 unit intervals to implement simulated failures if i == 200 or i == 400 or i == 600 or i == 800: sys.stdout.write("\nPress the Enter key to resume") sys.stdout.flush() if sys.version_info[0] < 3: raw_input() else: input() except Exception as ex: print(ex) finally: # Force an exists call to succeed by resetting the status blob_client.response_callback = response_callback # Clean up resources blob_client.delete_container(container_name)