parser = argparse.ArgumentParser(description='Fetch match data from the blue alliance.') parser.add_argument('--year', type=int, default=2020, help='Season to fetch') parser.add_argument('--reset', help='Force re-fetch of the entire season', action='store_true') parser.add_argument('--events', help='events to pull', default="") args = parser.parse_args() # Configure API key authorization: apiKey configuration = v3client.Configuration() configuration.api_key['X-TBA-Auth-Key'] = 'H5BU1gIXB57bFxNXNGQswd4E59Gs4rLuSooiPWYuu0c0zh8tBVuLQrwBJepUgXUQ' # Uncomment below to setup prefix (e.g. Bearer) for API key, if needed # net.thefletcher.tbaapi.v3client.configuration.api_key_prefix['X-TBA-Auth-Key'] = 'Bearer' # create an instance of the API class #api_instance = v3client.TBAApi() api_instance = v3client.EventApi(v3client.ApiClient(configuration)) #team_key = 'frc492' # str | TBA Team Key, eg `frc254` #if_modified_since = 'if_modified_since_example' # str | Value of the `Last-Modified` header in the most recently cached response by the client. (optional) def fetch_all_matches(year, eventsToPull="", reset=False): if_modified_since = '' result = {} eventsFilter = None if eventsToPull!="": eventsFilter=eventsToPull.split(',') outfile = 'matches_{}.pkl'.format(year) if os.path.exists(outfile): with open(outfile, 'rb') as inresult: try: result=pickle.load(inresult)
from collections import OrderedDict from dotted.collection import DottedDict import swagger_client as fec_client from swagger_client.rest import ApiException import config api_key = config.api_key configuration = fec_client.Configuration() configuration.api_key['api_key'] = api_key configuration.host = 'https://api.open.fec.gov/v1' disbursements_api = fec_client.DisbursementsApi(fec_client.ApiClient(configuration)) dotted_result_keys_to_column_names = OrderedDict({ 'amendment_indicator': 'amendment_indicator', 'amendment_indicator_desc': 'amendment_indicator_desc', 'back_reference_schedule_id': 'back_reference_schedule_id', 'back_reference_transaction_id': 'back_reference_transaction_id', 'beneficiary_committee_name': 'beneficiary_committee_name', 'candidate_first_name': 'candidate_first_name', 'candidate_id': 'candidate_id', 'candidate_last_name': 'candidate_last_name', 'candidate_middle_name': 'candidate_middle_name', 'candidate_name': 'candidate_name', 'candidate_office': 'candidate_office', 'candidate_office_description': 'candidate_office_description', 'candidate_office_district': 'candidate_office_district',
from dotted.collection import DottedDict import swagger_client as fec_client from swagger_client.rest import ApiException import config api_key = config.api_key configuration = fec_client.Configuration() configuration.api_key['api_key'] = api_key configuration.host = 'https://api.open.fec.gov/v1' disbursements_api = fec_client.DisbursementsApi( fec_client.ApiClient(configuration)) dotted_result_keys_to_column_names = OrderedDict({ 'amendment_indicator': 'amendment_indicator', 'amendment_indicator_desc': 'amendment_indicator_desc', 'back_reference_schedule_id': 'back_reference_schedule_id', 'back_reference_transaction_id': 'back_reference_transaction_id', 'beneficiary_committee_name': 'beneficiary_committee_name', 'candidate_first_name': 'candidate_first_name', 'candidate_id':
def __init__(self, config_file_path="../config/api_config.json", client_name="r7ivm3_python_client", disable_insecure_request_warnings=True): if disable_insecure_request_warnings == True: import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) with open(config_file_path, 'r') as infile: cfg_file = json.load(infile) # Instantiate an instance of the r7_ivm_swag module's Configuration class self.config = swagger_client.Configuration(name=client_name) self.config.username = cfg_file["rapid7"]["credentials"]["username"] self.config.password = cfg_file["rapid7"]["credentials"]["password"] self.config.host = cfg_file["rapid7"]["api_url"] self.config.verify_ssl = False self.config.assert_hostname = False self.config.proxy = None self.config.ssl_ca_cert = None self.config.connection_pool_maxsize = None self.config.cert_file = None self.config.key_file = None self.config.safe_chars_for_path_param = '' self.auth = "%s:%s" % (self.config.username, self.config.password) self.auth = base64.b64encode(self.auth.encode('ascii')).decode() self.api_client = swagger_client.ApiClient(configuration=self.config) self.api_client.default_headers[ 'Authorization'] = "Basic %s" % self.auth # Create API resources self.administration_api = swagger_client.AdministrationApi( self.api_client) self.asset_api = swagger_client.AssetApi(self.api_client) self.asset_discovery_api = swagger_client.AssetDiscoveryApi( self.api_client) self.asset_group_api = swagger_client.AssetGroupApi(self.api_client) self.credential_api = swagger_client.CredentialApi(self.api_client) self.policy_api = swagger_client.PolicyApi(self.api_client) self.policy_override_api = swagger_client.PolicyOverrideApi( self.api_client) self.remediation_api = swagger_client.RemediationApi(self.api_client) self.report_api = swagger_client.ReportApi(self.api_client) self.root_api = swagger_client.RootApi(self.api_client) self.scan_api = swagger_client.ScanApi(self.api_client) self.scan_engine_api = swagger_client.ScanEngineApi(self.api_client) self.scan_template_api = swagger_client.ScanTemplateApi( self.api_client) self.site_api = swagger_client.SiteApi(self.api_client) self.tag_api = swagger_client.TagApi(self.api_client) self.user_api = swagger_client.UserApi(self.api_client) self.vulnerability_api = swagger_client.VulnerabilityApi( self.api_client) self.vulnerability_check_api = swagger_client.VulnerabilityCheckApi( self.api_client) self.vulnerability_exception_api = swagger_client.VulnerabilityExceptionApi( self.api_client) self.vulnerability_result_api = swagger_client.VulnerabilityResultApi( self.api_client)
data_point_id = '' # string - A datapoint ID can be fetched by using the graphical interface at https://api.aedifion.io/ui/#!/Project/get_project_datapoints start = '' # string - datetime e.g. '2018-06-02 16:00:00' end = '' # string - datetime e.g. '2018-06-02 18:00:00' # Configure HTTP basic authorization: basicAuth configuration = swagger_client.Configuration() # Configure HTTP basic authorization: basicAuth configuration.username = input("Username: "******"Password: "******""" Example 1 (GET Request): Retrieve, parse and print timeseries data details. """ # create an API instance api_instance = swagger_client.ApiClient(configuration=configuration) DatapointApi = swagger_client.DatapointApi(api_client=api_instance) UserApi = swagger_client.UserApi(api_client=api_instance) try: # Get a timeseries. timeseries = DatapointApi.get_datapoint_timeseries( project_id=project_id, data_point_id=data_point_id, start=start, end=end) timeseries_dict = timeseries.to_dict() print('Timeseries data in dict data structur:') print(timeseries_dict) except ApiException as e: print(
def swg_query_client(swg_config): api = swagger_client.QueryApi(swagger_client.ApiClient(swg_config)) yield api
def action_marketpay_wallet(self, order): ######## Obtener Wallet de cada Producto ################ print(order.order_line[0].product_id.project_wallet) credited_wallet_id = order.order_line[0].product_id.project_wallet ######## Obtener Acquirer Marketpay ################# acquirer = self.env['payment.acquirer'].sudo().search( [('is_wallet_acquirer', '=', True)], limit=1) if not acquirer: raise UserError( _('No acquirer configured. Please create wallet acquirer.')) # Configuración CLiente encoded = acquirer.x_marketpay_key + ":" + acquirer.x_marketpay_secret token_url = 'https://api-sandbox.marketpay.io/v2.01/oauth/token' key = 'Basic %s' % base64.b64encode( encoded.encode('ascii')).decode('ascii') data = {'grant_type': 'client_credentials'} headers = { 'Authorization': key, 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(token_url, data=data, headers=headers) rs = r.content.decode() response = json.loads(rs) token = response['access_token'] # Configuración default de Swagger config = swagger_client.Configuration() config.host = acquirer.x_marketpay_domain config.access_token = token client = swagger_client.ApiClient(configuration=config) api_instance = swagger_client.Configuration.set_default(config) ############ trae los valores del partner ############ # walletid = marketpaydata.x_marketpaywallet_id # userid = marketpaydata.x_marketpayuser_id walletid = "9347379" userid = "9347382" currency = "EUR" amount = str(int(round(order.order_line[0].product_uom_qty * 100))) print(amount) amountfee = acquirer.x_marketpay_fee # create an instance of the API class api_instance = swagger_client.TransfersApi() fees = swagger_client.Money(amount=amountfee, currency=currency) debited_founds = swagger_client.Money(amount=amount, currency=currency) credited_user_id = "9347382" debited_wallet_id = "9347379" transfer = swagger_client.TransferPost( credited_user_id=credited_user_id, debited_funds=debited_founds, credited_wallet_id=credited_wallet_id, debited_wallet_id=debited_wallet_id, fees=fees) try: api_response = api_instance.transfers_post(transfer=transfer) print(api_response) except ApiException as e: print("Exception when calling UsersApi->users_post: %s\n" % e) return True
configuration.host = 'http://127.0.0.1:8081/v1' # engine_url='tcp://160.85.2.17:2376' engine_url = 'tcp://160.85.2.17:2376' ca_cert_file = '../../../client/secure-docker-socket/ca.pem' cert_file = '../../../client/secure-docker-socket/cert.pem' cert_key_file = '../../../client/secure-docker-socket/key.pem' network_name = 'network_name2' ca_cert = read_file(ca_cert_file) cert = read_file(cert_file) cert_key = read_file(cert_key_file) # create an instance of the API class api_instance = swagger_client.NetworkApi( swagger_client.ApiClient(configuration)) network = swagger_client.Network( engine_url=engine_url, ca_cert=ca_cert, cert=cert, cert_key=cert_key, name=network_name) # Network | Definition of network to be created try: # Create a network with the given name api_instance.create_network(network) print('Test network creation finished...') api_instance.delete_network(network) print('Test network deletion finished...')
#endregion #region Test Swagger_Client Web API if run_swagger_client_api: # default get start code from nsw yaml # from __future__ import print_function import sys # for using print('function: {}'.format(sys._getframe().f_code.co_name)) import time import swagger_client from swagger_client.rest import ApiException from swagger_client import configuration from pprint import pprint # create an instance of the API class # api_instance = swagger_client.DefaultApi(swagger_client.ApiClient(configuration)) api_instance = swagger_client.DefaultApi(swagger_client.ApiClient()) game_id = 'game_id_example' # str | The ID of the game to return try: # Delete the given game api_instance.games_game_id_delete(game_id) except ApiException as e: print("Exception when calling DefaultApi->games_game_id_delete: %s\n" % e) #endregion #region Graphic Drawing if run_graphics: from generaltools import * from MyDialog import MyDialog_JoinGameDlg, MyDialog_StartGameDlg
def get_strava(last_date=False): ## Getting log in info config = configparser.ConfigParser() config.read("/home/irarickman/strava.ini") params = { 'client_id': config['strava']['client_id'], 'client_secret': config['strava']['client_secret'], 'code': config['strava']['code'] } auth_url = config['strava']['auth_url'] ref_url = config['strava']['ref_url'] athlete_id = config['strava']['athlete_id'] if not last_date: ## Getting the most recent date last_date = datetime.datetime.strptime( strav.start_date.max().split(' ')[0], "%Y-%m-%d") else: last_date = datetime.datetime.strptime(last_date, "%Y-%m-%d") timestamp = last_date.timestamp() delta = datetime.datetime.now() - last_date date_diff = delta.days + 5 r_auth = requests.post(auth_url, data=params) response = r_auth.json() configuration = swagger_client.Configuration() configuration.access_token = response['access_token'] # create an instance of the API class api_instance = swagger_client.ActivitiesApi( swagger_client.ApiClient(configuration)) if date_diff < 200: try: # Get Authenticated Athlete api_response = api_instance.get_logged_in_athlete_activities( after=timestamp, per_page=date_diff) except ApiException as e: print( "Exception when calling AthletesApi->get_logged_in_athlete: {}\n" .format(e)) else: num_rounds = math.ceil(date_diff / 200) api_response = [] for n in range(num_rounds): if n == num_rounds: page_num = date_diff - (200 * (n - 1)) else: page_num = 200 try: # Get Authenticated Athlete activities = api_instance.get_logged_in_athlete_activities( after=timestamp, page=n + 1, per_page=page_num) except ApiException as e: print( "Exception when calling AthletesApi->get_logged_in_athlete: {}\n" .format(e)) api_response = api_response + activities example = list(api_response[len(api_response) - 1].to_dict().keys()) example = [ x for x in example if x not in ['map', 'athlete', 'start_latlng', 'end_latlng'] ] dicts = {} for n in range(len(api_response)): d = api_response[n].to_dict() new_dict = {variable: d[variable] for variable in example} dicts[n] = new_dict index = list(dicts.keys()) strava = pd.DataFrame([dicts[key] for key in index], index=index) mult_mile = 0.000621371 strava['miles'] = strava.distance * mult_mile strava['race'] = strava.workout_type.apply(lambda x: 1 if x in [1.0, 11.0] else 0) strava['date_string'] = strava.start_date_local.astype(str).apply( lambda x: x[:10]) strava['moving_minutes'] = strava.moving_time / 60 strava['elapsed_minutes'] = strava.elapsed_time / 60 strava['rest'] = strava.elapsed_minutes - strava.moving_minutes ## average speed is in meters/second - 2.237 to multiply to mph strava['avg_mph'] = strava.average_speed * 2.237 strava.start_date = pd.to_datetime(strava.start_date_local) strava.sort_values('start_date', inplace=True) strava['time_since_last_act'] = ( pd.to_datetime(strava.start_date) - pd.to_datetime(strava.start_date.shift(1))).astype('timedelta64[h]') strava['order'] = strava.groupby('date_string').start_date.rank() if len(strav) == 0: wks_1.set_dataframe(strava, (1, 1)) else: all_acts = pd.concat([strava, strav]) all_acts.drop_duplicates(['id'], keep='first', inplace=True) wks_1.set_dataframe(all_acts, (1, 1))
print(api_response) except ApiException as e: print("Exception when calling ClubsApi->getClubActivitiesById: %s\n" % e) """ import time import swagger_client from swagger_client.rest import ApiException from pprint import pprint # Configure OAuth2 access token for authorization: strava_oauth configuration = swagger_client.Configuration() configuration.access_token = '2453e1a6f852283b1ac37c10b8ac3301e95ff0a6' # create an instance of the API class api_instance = swagger_client.ClubsApi(swagger_client.ApiClient(configuration)) id = 494747 # int | The identifier of the club. page = 1 # int | Page number. (optional) per_page = 5 # int | Number of items per page. Defaults to 30. (optional) (default to 30) try: # List Club Members api_response = api_instance.get_club_members_by_id(id, page=page, per_page=per_page) pprint(api_response) except ApiException as e: print("Exception when calling ClubsApi->get_club_members_by_id: %s\n" % e) """ try: # List Club Activities
TODO status: deserialization problem ''' from __future__ import print_function from pprint import pprint import swagger_client from swagger_client.rest import ApiException # import ase_back_api_product # from ase_back_api_product.rest import ApiException # Configure OAuth2 access token for authorization: OAuth2 # ase_back_api_product.configuration.access_token = 'YOUR_ACCESS_TOKEN' # create an instance of the API class api_client = swagger_client.ApiClient( host="http://sms-back-test.base.wolfspool.at/api") api_instance = swagger_client.DefaultApi(api_client) size = 1 # float | Size of array (optional) (default to 1) try: api_response = api_instance.products_get() pprint(api_response) except ApiException as e: print("Exception when calling DefaultApi->..._get: %s\n" % e)
def transcribe(): logging.info("Starting transcription client...") # configure API key authorization: subscription_key configuration = cris_client.Configuration() configuration.api_key['Ocp-Apim-Subscription-Key'] = SUBSCRIPTION_KEY # create the client object and authenticate client = cris_client.ApiClient(configuration) # create an instance of the transcription api class transcription_api = cris_client.CustomSpeechTranscriptionsApi( api_client=client) # get all transcriptions for the subscription transcriptions: List[ cris_client.Transcription] = transcription_api.get_transcriptions() logging.info("Deleting all existing completed transcriptions.") # delete all pre-existing completed transcriptions # if transcriptions are still running or not started, they will not be deleted for transcription in transcriptions: transcription_api.delete_transcription(transcription.id) logging.info("Creating transcriptions.") # Use base models for transcription. Comment this block if you are using a custom model. # Note: you can specify additional transcription properties by passing a # dictionary in the properties parameter. See # https://docs.microsoft.com/azure/cognitive-services/speech-service/batch-transcription # for supported parameters. transcription_definition = cris_client.TranscriptionDefinition( name=NAME, description=DESCRIPTION, locale=LOCALE, recordings_url=RECORDINGS_BLOB_URI) # Uncomment this block to use custom models for transcription. # Model information (ADAPTED_ACOUSTIC_ID and ADAPTED_LANGUAGE_ID) must be set above. # if ADAPTED_ACOUSTIC_ID is None or ADAPTED_LANGUAGE_ID is None: # logging.info("Custom model ids must be set to when using custom models") # transcription_definition = cris_client.TranscriptionDefinition( # name=NAME, description=DESCRIPTION, locale=LOCALE, recordings_url=RECORDINGS_BLOB_URI, # models=[cris_client.ModelIdentity(ADAPTED_ACOUSTIC_ID), cris_client.ModelIdentity(ADAPTED_LANGUAGE_ID)] # ) data, status, headers = transcription_api.create_transcription_with_http_info( transcription_definition) # extract transcription location from the headers transcription_location: str = headers["location"] # get the transcription Id from the location URI created_transcription: str = transcription_location.split('/')[-1] logging.info("Checking status.") completed = False while not completed: running, not_started = 0, 0 # get all transcriptions for the user transcriptions: List[ cris_client.Transcription] = transcription_api.get_transcriptions( ) # for each transcription in the list we check the status for transcription in transcriptions: if transcription.status in ("Failed", "Succeeded"): # we check to see if it was one of the transcriptions we created from this client if created_transcription != transcription.id: continue completed = True if transcription.status == "Succeeded": results_uri = transcription.results_urls["channel_0"] results = requests.get(results_uri) logging.info("Transcription succeeded. Results: ") logging.info(results.content.decode("utf-8")) else: logging.info("Transcription failed :{}.".format( transcription.status_message)) elif transcription.status == "Running": running += 1 elif transcription.status == "NotStarted": not_started += 1 logging.info( "Transcriptions status: " "completed (this transcription): {}, {} running, {} not started yet" .format(completed, running, not_started)) # wait for 5 seconds time.sleep(5) input("Press any key...")
import time import swagger_client from swagger_client.rest import ApiException from pprint import pprint # create an instance of the API class api_client = swagger_client.ApiClient(host="http://localhost:8080") api_instance = swagger_client.GrafeasApi(api_client) try: projects_id = 'projects_id_example' # str | Part of `parent`. This field contains the projectId for example: \"project/{project_id} note_id = 'note_id_example' # str | The ID to use for this note. (optional) # occurrence = swagger_client.Occurrence(name='projects/project_one/occurrences/occurrence_one', note_name='note_one') # api_response = api_instance.create_occurrence('project_one', occurrence=occurrence) # Create a note in this format # format: = fmt.Sprintf("%s/{project_id}/%s/{entity_id}", projectKeyword, resourceKeyword) note = swagger_client.Note(name='projects/project_one/notes/note_one' ) # Note | The Note to be inserted (optional) api_response = api_instance.create_note('project_one', note_id='note_one', note=note) api_response = api_instance.list_notes('project_one') pprint(api_response) except ApiException as e: print("Exception when calling GrafeasApi-> : %s\n" % e)
def swg_alias_client(swg_config): api = swagger_client.AliasApi(swagger_client.ApiClient(swg_config)) yield api
def main(): if display_players: # getting board information from the server # from __future__ import print_function import sys # for using print('function: {}'.format(sys._getframe().f_code.co_name)) import time import swagger_client from swagger_client.rest import ApiException from swagger_client import configuration from pprint import pprint import swagger_client from swagger_client.models import GetGame, Move, Player, StartGame, GetPlayer import tkinter.messagebox as messageBox import copy # global start_new_game # global play_with_another_player start_new_game = False play_with_another_player = False # play_as_spacific_player = False default_server = '10.44.37.98:9000' game_id = '' player = '' server = '' player1 = '' player2 = '' api_instance_1 = swagger_client.DefaultApi( swagger_client.ApiClient()) games = api_instance_1.games_get( ) # 'http://10.44.37.98:9000/games/') print('games=\n{}'.format(games)) if messageBox.askokcancel("BattleShips", "Start a new game?", icon="question"): # NOTE, the order of the params provide has to be in the same order as defined in # MyDialog_StartGameDlg.__init__ start_new_game = True game_def = None d = MyDialog_StartGameDlg("Start a new game", default_server, player1, player2, root) game_def = d.result if not game_def: messageBox.showerror( 'error', 'two player names are needed to start a new game', icon="error") else: while game_def['start_another_game']: d = MyDialog_StartGameDlg("Start a new game", default_server, player1, player2, root) game_def = d.result server = game_def['server'] player1 = game_def['player1'] player2 = game_def['player2'] print( 'starting a new game: server={}, player1={}, player2={}\n' .format(server, player1, player2)) # NOTE, api_instance_1.games_post returns a game_id object, then id method returns the string game_id = api_instance_1.games_post( body=StartGame(player1, player2)).id print('new game started, game_id={}'.format(game_id)) if messageBox.askokcancel("BattleShips", "Play with another player?", icon="question"): play_with_another_player = True # NOTE, the order of the params provide has to be in the same order as defined in # MyDialog_JoinGameDlg.__init__ if ((start_new_game and play_with_another_player) or not start_new_game): game_def = None # NOTE, if started a new game, game_id has a value, else, empty d = MyDialog_JoinGameDlg("Join a game", game_id, '', play_with_another_player, root) game_def = d.result # Rule I: # either game_id or player value must be provided, cannot be both empty # Rule II: # If play with another player, player name must be provided: # Rule III: # if player provided, it should be the self-player, and two scenarios entail: # 1. game_id provided, the player only plays with a specific game # 2. game_id not provided, the player plays multiple games that request the player's participation # Rules IV: # if player not provided, game_id must be provided, and the client plays both players if not game_def: errorMsg = 'current setting: play_with_another_player = {}, ' \ ' some information missing'.format(play_with_another_player) messageBox.showerror('error', errorMsg, icon="error") else: game_id = game_def['game_id'] player = game_def['player'] if (player == ''): player_str = 'play two players' else: player_str = player print('\njoined a game: game_id: {}, player: {}\n'.format( game_id, player_str)) # drawing graphics based on information from the server # NOTE, number range from 1 to 11, as last 11 is excluded, # chr value range from A to K, as last K is exlcluded # and top row and left col are for labelling int_list = list(range(1, 11)) chr_val_list = list(range(ord('A'), ord('K'))) print('int_list={}, chr_val_list={}'.format( int_list, chr_val_list)) while ( True ): # keep the client alive checking if anyone requires a game game_ids = list() if game_id == '': player_info = api_instance_1.players_name_get(player) game_ids = player_info.games else: game_ids.append(game_id) print('current player = {}, game_ids = {}'.format( player, game_ids)) for this_game_id in game_ids: print('this_game_id = {}, getting game information'.format( this_game_id)) game = api_instance_1.games_game_id_get(this_game_id) print('game=\n{}'.format(game)) player1_fleet_state = get_initial_fleet_state(game) player2_fleet_state = copy.deepcopy(player1_fleet_state) if ((game.winner == '') and (not play_with_another_player or (play_with_another_player and game.move == player))): if make_random_move: grid_ref = get_random_move_with_player_knowledge( int_list, chr_val_list, game) elif enforce_non_adjacent_ships: grid_ref, player1_fleet_state, player2_fleet_state = \ get_stratigic_move_with_player_knowledge_enforce_non_adjacent_ships( int_list, chr_val_list, game, player1_fleet_state, player2_fleet_state) else: grid_ref = get_stratigic_move_with_player_knowledge( int_list, chr_val_list, game) print('this_game_id={}, grid_ref={}. game.move={}'. format(this_game_id, grid_ref, game.move)) move_result = api_instance_1.games_game_id_grid_ref_put( this_game_id, grid_ref, body=Move(game.move)) print('game.move={}\ngrid_ref={}\nmove_result=\n{}'. format(game.move, grid_ref, move_result)) game = api_instance_1.games_game_id_get(this_game_id) print('this_game_id={}\ngame=\n{}'.format( this_game_id, game)) update_plot(number_of_cells, font_szie, w, global_total_rows_in_plot, fig, plotCanvas, grid_texts_default, grid_colours_default, game) elif display_graphic: test_realtime_display_graphics(number_of_cells, font_szie, w, global_total_rows_in_plot, fig, plotCanvas, grid_texts_default, grid_colours_default, sleep_param=0.5) else: test_title_img_pairs_list = generate_test_title_img_pairs_list() print('test_title_img_pairs_list = \n{}'.format( test_title_img_pairs_list)) test_realtime_display_group_of_images(global_total_rows_in_plot, test_title_img_pairs_list, fig, plotCanvas) root.mainloop()
def swg_dos_client(swg_config): api = swagger_client.DOSApi(swagger_client.ApiClient(swg_config)) yield api
from __future__ import print_function import time import swagger_client from swagger_client.rest import ApiException from pprint import pprint # create an instance of the API class api_instance = swagger_client.ConfigurationApi(swagger_client.ApiClient()) cfg = swagger_client.NewConfiguration( name="GoogleSettings", value={"list": 3} ) # NewConfiguration | Configuration object to be added. Duplicates are allowed. try: # Adds a new configuration api_response = api_instance.add_configuration(cfg) pprint(api_response) except ApiException as e: print("Exception when calling ConfigurationApi->add_configuration: %s\n" % e)
def swg_bulk_client(swg_config): api = swagger_client.BulkApi(swagger_client.ApiClient(swg_config)) yield api
def set_api_instance_for_token(): global api_instance configuration = swagger_client.Configuration() api_instance = swagger_client.DefaultApi( swagger_client.ApiClient(configuration))
def main(argv): opts, args = getopt.getopt(argv, "s:", ["size="]) FIRST_REGION_NAME = "RegionOne" KEYSTONE_ENDPOINT = "http://{{ ansible_eno1.ipv4.address }}/identity/v3" #KEYSTONE_ENDPOINT = "http://192.168.57.6/identity/v3" def get_session_object(auth_param): return session.Session(auth=auth_param) def get_auth_object(keystone_endpoint): return v3.Password( username="******", password="******", project_name="demo", auth_url=keystone_endpoint, user_domain_id="default", project_domain_id="default", include_catalog=True, # Allow fetching a new token if the current one is going to expire reauthenticate=True) auth = get_auth_object(KEYSTONE_ENDPOINT) sess = get_session_object(auth) # Authenticate auth.get_access(sess) auth_ref = auth.auth_ref #print("Auth token: %s" %auth_ref.auth_token) catalog_endpoints = auth_ref.service_catalog.catalog #print("Resource catalog: %s" % catalog_endpoints) regions_list_neu = [] regions_list_key = [] regions_list = [] for obj in catalog_endpoints: if obj['name'] == 'neutron': for endpoint in obj['endpoints']: #print(endpoint) new_endpoint_obj = { 'region_name': endpoint["region"], 'neutron_url': endpoint["url"] } regions_list_neu.append(new_endpoint_obj) if obj['name'] == 'keystone': for endpoint in obj['endpoints']: if endpoint['interface'] == 'public': new_endpoint_obj = { 'region_name': endpoint["region"], 'keystone_url': endpoint["url"] } regions_list_key.append(new_endpoint_obj) #print(endpoint) print(regions_list_neu) print(regions_list_key) for i in range(len(regions_list_neu)): neutron_endpoint = regions_list_neu[i] print(neutron_endpoint) for j in range(len(regions_list_key)): keystone_endpoint = regions_list_key[j] print(keystone_endpoint) if neutron_endpoint['region_name'] == keystone_endpoint[ 'region_name']: new_end = { 'region_name': neutron_endpoint['region_name'], 'keystone_url': keystone_endpoint['keystone_url'], 'neutron_url': neutron_endpoint['neutron_url'] } regions_list.append(new_end) print(regions_list) cidrs_region_network_information = { '10.0.0.0/24': [], '10.0.1.0/24': [], '10.0.2.0/24': [], '10.0.3.0/24': [], '10.0.4.0/24': [], '10.0.5.0/24': [], '10.0.6.0/24': [], '10.0.7.0/24': [], '10.0.8.0/24': [], '10.0.9.0/24': [], '10.0.10.0/24': [], '10.0.11.0/24': [], '10.0.12.0/24': [], '10.0.13.0/24': [], '10.0.14.0/24': [], '10.0.15.0/24': [], '10.0.16.0/24': [], '10.0.17.0/24': [], '10.0.18.0/24': [], '10.0.19.0/24': [], '10.0.20.0/24': [], '10.0.21.0/24': [], '10.0.22.0/24': [], '10.0.23.0/24': [], '10.0.24.0/24': [], '10.0.25.0/24': [], '10.0.26.0/24': [], '10.0.27.0/24': [], '10.0.28.0/24': [], '10.0.29.0/24': [], '10.0.30.0/24': [], '10.0.31.0/24': [], '10.0.32.0/24': [], '10.0.33.0/24': [], '10.0.34.0/24': [], '10.0.35.0/24': [], '10.0.36.0/24': [], '10.0.37.0/24': [], '10.0.38.0/24': [], '10.0.39.0/24': [], '10.0.40.0/24': [], '10.0.41.0/24': [], '10.0.42.0/24': [], '10.0.43.0/24': [], '10.0.44.0/24': [], '10.0.45.0/24': [], '10.0.46.0/24': [], '10.0.47.0/24': [], '10.0.48.0/24': [] } #cidrs_region_network_information = {'10.0.0.0/24': [], '20.0.0.0/24': []} # For every region find the networks created with heat for i in range(len(regions_list)): region_name, region_auth_endpoint, region_neutron_endpoint = regions_list[ i]['region_name'], regions_list[i][ 'keystone_url'] + '/v3', regions_list[i]['neutron_url'] auth = get_auth_object(region_auth_endpoint) sess = get_session_object(auth) print('Getting information from region ' + str(region_name)) # Authenticate auth.get_access(sess) auth_ref = auth.auth_ref net_adap = Adapter(auth=auth, session=sess, service_type='network', interface='public', region_name=region_name) per_region_net_list = net_adap.get('/v2.0/networks').json() region_network = per_region_net_list['networks'] # For every network find the cidr of the subnetwork it has for index in range(len(region_network)): net_ID = region_network[index]['id'] subnet_ID = region_network[index]['subnets'][0] per_net_subnet = net_adap.get('/v2.0/subnets/' + subnet_ID).json() subnet_cidr = per_net_subnet['subnet']['cidr'] #print(subnet_cidr) test_object = { 'region_name': region_name, 'net_uuid': net_ID, } cidrs_region_network_information[subnet_cidr].append(test_object) #print(cidrs_region_network_information) test_type1 = "L3" test_type2 = "L2" print('starting tests') test_sizes_temps = (opts[0][1]) test_sizes = test_sizes_temps.split(',') test_number = 100 configuration = Configuration() for elem in test_sizes: test_size = int(elem) if (test_type1 == "L3"): file_results = open( "results/Results_" + test_type1 + "_" + str(test_size) + "_" + str(datetime.datetime.now().strftime("%H:%M:%S")), "w+") #file_results.write("L3\n") #file_results.write(str(test_size)+"\n") #file_results.write(str(test_number)+"\n") for i in range(test_number): seed(datetime.datetime.now()) selected_index = randint(1, len(regions_list)) host = regions_list[selected_index - 1] #print(host['region_name']) configuration.host = host['neutron_url'][0:-5] + "7575/api" api_instance = swagger_client.ResourcesApi( swagger_client.ApiClient(configuration)) resource = swagger_client.Resource( ) # Resource | data for inter-site creation resource.type = "L3" resource.name = "Inter-site network test " + str(i) condition = True keys = [] regions = [] subresources = [] while (condition): seed(datetime.datetime.now()) key = random.choice(list(cidrs_region_network_information)) condition1 = True while (condition1): seed(datetime.datetime.now()) second_element = random.randint( 1, len(cidrs_region_network_information[key])) element = cidrs_region_network_information[key][ second_element - 1] if element['region_name'] == host['region_name']: #print(key) #print(element) keys.append(key) regions.append(element['region_name']) subresources.append(element['region_name'] + "," + element['net_uuid']) condition = False condition1 = False break for j in range(test_size - 1): #print(j) condition = True condition1 = True while (condition and condition1): seed(datetime.datetime.now()) key = random.choice( list(cidrs_region_network_information)) seed(datetime.datetime.now()) second_element = random.randint( 1, len(cidrs_region_network_information[key])) element = cidrs_region_network_information[key][ second_element - 1] if element[ 'region_name'] not in regions and key not in keys: #print(key) #print(element) keys.append(key) regions.append(element['region_name']) subresources.append(element['region_name'] + "," + element['net_uuid']) condition = False condition1 = False break print(i) print(subresources) print(regions) print(keys) resource.subresources = subresources api_response = '' #start = time.clock() start = time.time() try: # Horizontal request to create an inter-site Resource POST api_response = api_instance.vertical_create_resource( resource) #print(api_response['resource_global']) except ApiException as e: print( "Exception when calling VerticalApi->vertical_create_resource: %s\\n" % e) #end = time.clock() end = time.time() print(api_response["resource_global"]) print(start) print(end) print(end - start) file_results.write(str(end - start) + "\n") try: delete_resource = api_instance.vertical_delete_resource( api_response['resource_global']) except ApiException as e: print( "Exception when calling VerticalApi->vertical_create_resource: %s\n" % e) file_results.close() if (test_type2 == "L2"): file_results = open( "results/Results_" + test_type2 + "_" + str(test_size) + "_" + str(datetime.datetime.now().strftime("%H:%M:%S")), "w+") for i in range(test_number): seed(datetime.datetime.now()) selected_index = randint(1, len(regions_list)) host = regions_list[selected_index - 1] #print(host['region_name']) configuration.host = host['neutron_url'][0:-5] + "7575/api" api_instance = swagger_client.ResourcesApi( swagger_client.ApiClient(configuration)) resource = swagger_client.Resource( ) # Resource | data for inter-site creation resource.type = "L2" resource.name = "Inter-site network test " + str(i) condition = True regions = [] subresources = [] while (condition): seed(datetime.datetime.now()) key = random.choice(list(cidrs_region_network_information)) condition1 = True while (condition1): second_element = random.randint( 1, len(cidrs_region_network_information[key])) element = cidrs_region_network_information[key][ second_element - 1] if element['region_name'] == host['region_name']: regions.append(element['region_name']) subresources.append(element['region_name'] + "," + element['net_uuid']) condition = False condition1 = False break for j in range(test_size - 1): #print(j) condition = True while (condition): seed(datetime.datetime.now()) new_index = randint(1, len(regions_list)) new_host = regions_list[new_index - 1] #print(host['region_name']) if new_host['region_name'] not in regions: regions.append(new_host['region_name']) subresources.append(new_host['region_name'] + ",") condition = False break print(i) print(subresources) print(regions) resource.subresources = subresources api_response = "" start = time.time() try: # Horizontal request to create an inter-site Resource POST api_response = api_instance.vertical_create_resource( resource) #print(api_response['resource_global']) except ApiException as e: print( "Exception when calling VerticalApi->vertical_create_resource: %s\n" % e) end = time.time() print(api_response["resource_global"]) print(end - start) file_results.write(str(end - start) + "\n") try: delete_resource = api_instance.vertical_delete_resource( api_response['resource_global']) except ApiException as e: print( "Exception when calling VerticalApi->vertical_create_resource: %s\n" % e) file_results.close()
def transcribe(url): logging.info("Starting transcription client...") # Your subscription key and region for the speech service SUBSCRIPTION_KEY = "" SERVICE_REGION = "southcentralus" NAME = "Simple transcription" DESCRIPTION = "Simple transcription description" LOCALE = "en-US" # Set subscription information when doing transcription with custom models ADAPTED_ACOUSTIC_ID = None # guid of a custom acoustic model ADAPTED_LANGUAGE_ID = None # guid of a custom language model # configure API key authorization: subscription_key configuration = cris_client.Configuration() configuration.api_key['Ocp-Apim-Subscription-Key'] = SUBSCRIPTION_KEY configuration.host = "https://{}.cris.ai".format(SERVICE_REGION) # create the client object and authenticate client = cris_client.ApiClient(configuration) # create an instance of the transcription api class transcription_api = cris_client.CustomSpeechTranscriptionsApi(api_client=client) # get all transcriptions for the subscription transcriptions: List[cris_client.Transcription] = transcription_api.get_transcriptions() logging.info("Deleting all existing completed transcriptions.") # delete all pre-existing completed transcriptions # if transcriptions are still running or not started, they will not be deleted for transcription in transcriptions: try: transcription_api.delete_transcription(transcription.id) except ValueError: # ignore swagger error on empty response message body: https://github.com/swagger-api/swagger-core/issues/2446 pass # Use base models for transcription. Comment this block if you are using a custom model. # Note: you can specify additional transcription properties by passing a # dictionary in the properties parameter. See # https://docs.microsoft.com/azure/cognitive-services/speech-service/batch-transcription # for supported parameters. logging.info("Printing URL ::: {}".format(url)) transcription_definition = cris_client.TranscriptionDefinition( name=NAME, description=DESCRIPTION, locale=LOCALE, recordings_url=url ) # Uncomment this block to use custom models for transcription. # Model information (ADAPTED_ACOUSTIC_ID and ADAPTED_LANGUAGE_ID) must be set above. # if ADAPTED_ACOUSTIC_ID is None or ADAPTED_LANGUAGE_ID is None: # logging.info("Custom model ids must be set to when using custom models") # transcription_definition = cris_client.TranscriptionDefinition( # name=NAME, description=DESCRIPTION, locale=LOCALE, recordings_url=RECORDINGS_BLOB_URI, # models=[cris_client.ModelIdentity(ADAPTED_ACOUSTIC_ID), cris_client.ModelIdentity(ADAPTED_LANGUAGE_ID)] # ) data, status, headers = transcription_api.create_transcription_with_http_info(transcription_definition) # extract transcription location from the headers transcription_location: str = headers["location"] # get the transcription Id from the location URI created_transcription: str = transcription_location.split('/')[-1] logging.info("Created new transcription with id {}".format(created_transcription)) logging.info("Checking status.") completed = False while not completed: running, not_started = 0, 0 # get all transcriptions for the user transcriptions: List[cris_client.Transcription] = transcription_api.get_transcriptions() # for each transcription in the list we check the status for transcription in transcriptions: if transcription.status in ("Failed", "Succeeded"): # we check to see if it was the transcription we created from this client if created_transcription != transcription.id: continue completed = True if transcription.status == "Succeeded": results_uri = transcription.results_urls["channel_0"] results = requests.get(results_uri) logging.info("Transcription succeeded. Results: ") logging.info(results.content.decode("utf-8")) return results.content.decode("utf-8") else: logging.info("Transcription failed :{}.".format(transcription.status_message)) break elif transcription.status == "Running": running += 1 elif transcription.status == "NotStarted": not_started += 1 logging.info("Transcriptions status: " "completed (this transcription): {}, {} running, {} not started yet".format( completed, running, not_started)) # wait for 5 seconds time.sleep(5)
# 89 - PRECIPITACIÓN # ### [1.2.0] - Codegen + API # In[ ]: configuration = swagger_client.Configuration() configuration.api_key['api_key'] = AEMET_API_KEY # In[ ]: #api_instance = swagger_client.AvisosCapApi(swagger_client.ApiClient(configuration)) api_observacion = swagger_client.ObservacionConvencionalApi(swagger_client.ApiClient(configuration)) # ### [1.2.1] - [FUNCION] - Formateo datos # In[ ]: def data_to_sparkdf(data): #Encoding "ISO-8859" data_v = data.decode(encoding ='ISO-8859-15') data_v0 = data_v # Clean the data # Step 0 for i in range(20):
# preliminaries import swagger_client base_url = 'http://localhost:8200/api' client_id = '' client_secret = '' # instantiate Auth API unauthenticated_client = swagger_client.ApiClient(base_url) #unauthenticated_authApi = api.ApiAuthApi(unauthenticated_client) # authenticate client #token = unauthenticated_authApi.login(client_id=client_id, client_secret=client_secret) #client = api.ApiClient(base_url, 'Authorization', 'token ' + token.access_token) client = swagger_client.ApiClient(base_url) # instantiate Look API api = swagger_client.DefaultApi(client) model = swagger_client
def _create_client(server, credential, debug, api_type="products"): cfg = None if api_type in ('projectv2', 'artifact', 'repository', 'scanner', 'scan', 'scanall', 'preheat', 'quota', 'replication', 'registry', 'robot', 'gc', 'retention', 'immutable', 'system_cve_allowlist', 'configure', 'user', 'member', 'health', 'label', 'webhook'): cfg = v2_swagger_client.Configuration() else: cfg = swagger_client.Configuration() cfg.host = server.endpoint cfg.verify_ssl = server.verify_ssl # support basic auth only for now cfg.username = credential.username cfg.password = credential.password cfg.debug = debug proxies = getproxies() proxy = proxies.get('http', proxies.get('all', None)) if proxy: cfg.proxy = proxy if cfg.username is None and cfg.password is None: # returns {} for auth_settings for anonymous access import types cfg.auth_settings = types.MethodType(lambda self: {}, cfg) return { "chart": client.ChartRepositoryApi(client.ApiClient(cfg)), "products": swagger_client.ProductsApi(swagger_client.ApiClient(cfg)), "projectv2": v2_swagger_client.ProjectApi(v2_swagger_client.ApiClient(cfg)), "artifact": v2_swagger_client.ArtifactApi(v2_swagger_client.ApiClient(cfg)), "preheat": v2_swagger_client.PreheatApi(v2_swagger_client.ApiClient(cfg)), "quota": v2_swagger_client.QuotaApi(v2_swagger_client.ApiClient(cfg)), "repository": v2_swagger_client.RepositoryApi(v2_swagger_client.ApiClient(cfg)), "scan": v2_swagger_client.ScanApi(v2_swagger_client.ApiClient(cfg)), "scanall": v2_swagger_client.ScanAllApi(v2_swagger_client.ApiClient(cfg)), "scanner": v2_swagger_client.ScannerApi(v2_swagger_client.ApiClient(cfg)), "replication": v2_swagger_client.ReplicationApi(v2_swagger_client.ApiClient(cfg)), "registry": v2_swagger_client.RegistryApi(v2_swagger_client.ApiClient(cfg)), "robot": v2_swagger_client.RobotApi(v2_swagger_client.ApiClient(cfg)), "gc": v2_swagger_client.GcApi(v2_swagger_client.ApiClient(cfg)), "retention": v2_swagger_client.RetentionApi(v2_swagger_client.ApiClient(cfg)), "immutable": v2_swagger_client.ImmutableApi(v2_swagger_client.ApiClient(cfg)), "system_cve_allowlist": v2_swagger_client.SystemCVEAllowlistApi( v2_swagger_client.ApiClient(cfg)), "configure": v2_swagger_client.ConfigureApi(v2_swagger_client.ApiClient(cfg)), "label": v2_swagger_client.LabelApi(v2_swagger_client.ApiClient(cfg)), "user": v2_swagger_client.UserApi(v2_swagger_client.ApiClient(cfg)), "member": v2_swagger_client.MemberApi(v2_swagger_client.ApiClient(cfg)), "health": v2_swagger_client.HealthApi(v2_swagger_client.ApiClient(cfg)), "webhook": v2_swagger_client.WebhookApi(v2_swagger_client.ApiClient(cfg)) }.get(api_type, 'Error: Wrong API type')
def swg_index_client(swg_config): api = swagger_client.IndexApi(swagger_client.ApiClient(swg_config)) yield api
def transcribe(): logging.info("Starting transcription client...") # configure API key authorization: subscription_key configuration = cris_client.Configuration() configuration.api_key['Ocp-Apim-Subscription-Key'] = SUBSCRIPTION_KEY configuration.host = "https://{}.cris.ai".format(SERVICE_REGION) # create the client object and authenticate client = cris_client.ApiClient(configuration) # create an instance of the transcription api class transcription_api = cris_client.CustomSpeechTranscriptionsApi(api_client=client) # get all transcriptions for the subscription transcriptions: List[cris_client.Transcription] = transcription_api.get_transcriptions() logging.info("Deleting all existing completed transcriptions.") # delete all pre-existing completed transcriptions # if transcriptions are still running or not started, they will not be deleted for transcription in transcriptions: try: transcription_api.delete_transcription(transcription.id) except ValueError: # ignore swagger error on empty response message body: https://github.com/swagger-api/swagger-core/issues/2446 pass # Specify transcription properties by passing a dict to the properties parameter. See # https://docs.microsoft.com/azure/cognitive-services/speech-service/batch-transcription#configuration-properties # for supported parameters. properties = { # 'PunctuationMode': 'DictatedAndAutomatic', # 'ProfanityFilterMode': 'Masked', # 'AddWordLevelTimestamps': 'False', # 'AddDiarization': 'False', # 'AddSentiment': False, # 'TranscriptionResultsContainerUrl': "<results container>" } # Use base models for transcription. Comment this block if you are using a custom model. transcription_definition = cris_client.TranscriptionDefinition( name=NAME, description=DESCRIPTION, locale=LOCALE, recordings_url=RECORDINGS_BLOB_URI, properties=properties ) # Uncomment this block to use custom models for transcription. # Model information (ADAPTED_ACOUSTIC_ID and ADAPTED_LANGUAGE_ID) must be set above. # if ADAPTED_ACOUSTIC_ID is None or ADAPTED_LANGUAGE_ID is None: # logging.info("Custom model ids must be set to when using custom models") # transcription_definition = cris_client.TranscriptionDefinition( # name=NAME, description=DESCRIPTION, locale=LOCALE, recordings_url=RECORDINGS_BLOB_URI, # models=[cris_client.ModelIdentity(ADAPTED_ACOUSTIC_ID), cris_client.ModelIdentity(ADAPTED_LANGUAGE_ID)], # properties=properties # ) data, status, headers = transcription_api.create_transcription_with_http_info(transcription_definition) # extract transcription location from the headers transcription_location: str = headers["location"] # get the transcription Id from the location URI created_transcription: str = transcription_location.split('/')[-1] logging.info("Created new transcription with id {}".format(created_transcription)) logging.info("Checking status.") completed = False while not completed: running, not_started = 0, 0 # get all transcriptions for the user transcriptions: List[cris_client.Transcription] = transcription_api.get_transcriptions() # for each transcription in the list we check the status for transcription in transcriptions: if transcription.status in ("Failed", "Succeeded"): # we check to see if it was the transcription we created from this client if created_transcription != transcription.id: continue completed = True if transcription.status == "Succeeded": results_uri = transcription.results_urls["channel_0"] results = requests.get(results_uri) logging.info("Transcription succeeded. Results: ") logging.info(results.content.decode("utf-8")) else: logging.info("Transcription failed :{}.".format(transcription.status_message)) break elif transcription.status == "Running": running += 1 elif transcription.status == "NotStarted": not_started += 1 logging.info("Transcriptions status: " "completed (this transcription): {}, {} running, {} not started yet".format( completed, running, not_started)) # wait for 5 seconds time.sleep(5) input("Press any key...")
def swg_global_client(swg_config): api = swagger_client.GlobalApi(swagger_client.ApiClient(swg_config)) yield api
from swagger_client.models.guard_agent import GuardAgent import threading from swagger_client.GuardAgentSecurityContext import GuardAgentSecurityContext from swagger_client.models.security_properties import SecurityProperties from swagger_client.models.security_properties_value import SecurityPropertiesValue from swagger_client.models.configuration_properties import ConfigurationProperties from swagger_client.models.configuration_properties_value import ConfigurationPropertiesValue from swagger_client.models.data_schema import DataSchema from swagger_client.api.default_api import DefaultApi from swagger_client.api.orion_api_client import OrionApi from typing import List configuration = swagger_client.Configuration() api_instance = DefaultApi(swagger_client.ApiClient(configuration)) # api_instance = OrionApi(swagger_client.ApiClient(configuration)) id = "urn:guard:agent:openstack:00000001" def server_thread(): import connexion # from swagger_server import encoder #app = connexion.App(__name__, specification_dir='./swagger_server/swagger/') #app.app.json_encoder = encoder.JSONEncoder #app.add_api('swagger.yaml', arguments={'title': 'First Approach to an GUARD Programmability API'}) #app.run(port=3036) pass
def transcribe(): logging.info("Starting transcription client...") # configure API key authorization: subscription_key configuration = cris_client.Configuration() configuration.api_key["Ocp-Apim-Subscription-Key"] = SUBSCRIPTION_KEY configuration.host = f"https://{SERVICE_REGION}.api.cognitive.microsoft.com/speechtotext/v3.0" # create the client object and authenticate client = cris_client.ApiClient(configuration) # create an instance of the transcription api class api = cris_client.DefaultApi(api_client=client) # Specify transcription properties by passing a dict to the properties parameter. See # https://docs.microsoft.com/azure/cognitive-services/speech-service/batch-transcription#configuration-properties # for supported parameters. properties = { # "punctuationMode": "DictatedAndAutomatic", # "profanityFilterMode": "Masked", # "wordLevelTimestampsEnabled": True, # "diarizationEnabled": True, # "destinationContainerUrl": "<SAS Uri with at least write (w) permissions for an Azure Storage blob container that results should be written to>", # "timeToLive": "PT1H" } # Use base models for transcription. Comment this block if you are using a custom model. transcription_definition = transcribe_from_single_blob(RECORDINGS_BLOB_URI, properties) # Uncomment this block to use custom models for transcription. # transcription_definition = transcribe_with_custom_model(api, RECORDINGS_BLOB_URI, properties) # Uncomment this block to transcribe all files from a container. # transcription_definition = transcribe_from_container(RECORDINGS_CONTAINER_URI, properties) created_transcription, status, headers = api.create_transcription_with_http_info(transcription=transcription_definition) # get the transcription Id from the location URI transcription_id = headers["location"].split("/")[-1] # Log information about the created transcription. If you should ask for support, please # include this information. logging.info(f"Created new transcription with id '{transcription_id}' in region {SERVICE_REGION}") logging.info("Checking status.") completed = False while not completed: # wait for 5 seconds before refreshing the transcription status time.sleep(5) transcription = api.get_transcription(transcription_id) logging.info(f"Transcriptions status: {transcription.status}") if transcription.status in ("Failed", "Succeeded"): completed = True if transcription.status == "Succeeded": pag_files = api.get_transcription_files(transcription_id) for file_data in _paginate(api, pag_files): if file_data.kind != "Transcription": continue audiofilename = file_data.name results_url = file_data.links.content_url results = requests.get(results_url) logging.info(f"Results for {audiofilename}:\n{results.content.decode('utf-8')}") elif transcription.status == "Failed": logging.info(f"Transcription failed: {transcription.properties.error.message}")