def diffLatest(self): if self.latest_df is None: raise IsitfitCliError( "Internal dev error: Call TagsPush::pullLatest before TagsPush::diffLatest", self.ctx) if self.csv_df is None: raise IsitfitCliError( "Internal dev error: Call TagsPush::read_csv before TagsPush::diffLatest", self.ctx) # diff columns from .tagsCsvDiff import TagsCsvDiff td = TagsCsvDiff(self.latest_df, self.csv_df) td.noChanges() td.noNewInstances() td.getDiffCols() td.renamedTags() td.newTags() td.droppedTags() # print(td.migrations, td.old_minus_new, td.new_minus_old) td.anyRemaining() # get migrations import pandas as pd self.mig_df = pd.DataFrame(td.migrations, columns=['action', 'old', 'new']) logger.debug("") logger.debug("Tag migrations") if self.mig_df.shape[0] == 0: logger.debug("None") else: logger.debug(self.mig_df) logger.debug("")
def anyRemaining(self): if len(self.new_minus_old) > 0: msg_1 = "Aborting `tags push` due to new tags in pushed csv file, and user indicated they shouldnt be created" msg_2 = ", ".join(sorted(list(self.new_minus_old))) msg_3 = "%s: %s" % (msg_1, msg_2) raise IsitfitCliError(msg_3) if len(self.old_minus_new) > 0: msg_1 = "Aborting `tags push` due to missing tags in pushed csv file, and user indicated they shouldnt be deleted" msg_2 = ", ".join(sorted(list(self.old_minus_new))) msg_3 = "%s: %s" % (msg_1, msg_2) raise IsitfitCliError(msg_3)
def migrate_all(self): if self.df_mig.shape[0] == 0: if self.quiet: return raise IsitfitCliError("No migrations to execute") for i in range(self.df_mig.shape[0]): self._migrate_single(i)
def map_aws_datadog(self): # Build a map from AWS ID to Datadog hostname # FIXME Should probably paginate in pages of 100 hosts using combinations of start and count # Leaving for later until this proves to be a problem for isitfit memory consumption during execution. # https://docs.datadoghq.com/api/?lang=python#search-hosts # https://docs.datadoghq.com/agent/faq/how-datadog-agent-determines-the-hostname/?tab=agentv6v7#potential-host-names MAX_COUNT = 1000 # safety net, will cause problems for larger infra h_rev = datadog.api.Hosts.search(count=MAX_COUNT) if 'status' in h_rev: if h_rev['status'] == 'error': msg = "Datadog API/APP keys configured wrong?" if 'errors' in h_rev: msg += " Got error: %s" % ", ".join(h_rev['errors']) from isitfit.cli.click_descendents import IsitfitCliError raise IsitfitCliError(msg) # alternatively, can use host_name here. # Note the similar field used in self.hosts_search below. # If this field is changed from name to host_name, remember to change it below also h_rev = { x['aws_id']: x['name'] for x in h_rev['host_list'] if 'aws_id' in x and 'name' in x } return h_rev
def fetch(self): logger.debug("TagsSuggestBasic::fetch") logger.info("Counting EC2 instances") n_ec2_total = len(list(self.ec2_resource.instances.all())) msg_total = "Found a total of %i EC2 instances" % n_ec2_total if n_ec2_total == 0: from isitfit.cli.click_descendents import IsitfitCliError raise IsitfitCliError(msg_total, self.ctx) logger.warning(msg_total) self.tags_list = [] from tqdm import tqdm desc = "Scanning EC2 instances" ec2_all = self.ec2_resource.instances.all() for ec2_obj in tqdm(ec2_all, total=n_ec2_total, desc=desc, initial=1): if ec2_obj.tags is None: tags_dict = {} else: tags_dict = self.tags_to_dict(ec2_obj) tags_dict['instance_id'] = ec2_obj.instance_id self.tags_list.append(tags_dict) # convert to pandas dataframe when done self.tags_df = self._list_to_df()
def add_listener(self, event, listener): if event not in self.listeners: from isitfit.cli.click_descendents import IsitfitCliError err_msg = "Internal dev error: Event %s is not supported for listeners. Use: %s" % ( event, ",".join(self.listeners.keys())) raise IsitfitCliError(err_msg, self.ctx) self.listeners[event].append(listener)
def run_iterator(man2_i): try: r_i = list(man2_i.iterate_event()) except botocore.exceptions.ClientError as e: # display error message without the frightening traceback from isitfit.cli.click_descendents import IsitfitCliError raise IsitfitCliError(str(e)) return r_i
def validateTagsFile(self): if self.csv_df is None: raise IsitfitCliError( "Internal dev error: Call TagsPush::read_csv before TagsPush::validateTagsFile", self.ctx) csv_dict = self.csv_df.to_dict(orient='records') from schema import Schema, Optional, SchemaError csv_schema = Schema([{ 'instance_id': str, 'Name': str, Optional(str): str }]) try: csv_schema.validate(csv_dict) except SchemaError as e: raise IsitfitCliError("CSV is not a tags file: %s" % str(e), self.ctx)
def test_isitfitCliError(): from isitfit.cli.click_descendents import IsitfitCliError class MockContext: obj = {'bar': 1} command = None ctx = MockContext() with pytest.raises(IsitfitCliError) as e: raise IsitfitCliError("foo", ctx)
def read_csv(self): import pandas as pd try: # read all fields as string self.csv_df = pd.read_csv(self.csv_fn, dtype=str) except pd.errors.EmptyDataError as e_info: raise IsitfitCliError("Error reading csv: %s" % str(e_info), self.ctx) if self.csv_df.shape[0] == 0: raise IsitfitCliError("Tags csv file is empty", self.ctx) if 'instance_id' not in self.csv_df.columns: raise IsitfitCliError("Missing column instance_id", self.ctx) # sort by instance ID self.csv_df = self.csv_df.sort_values('instance_id', ascending=True) # fill na with '' self.csv_df = self.csv_df.fillna(value='')
def noNewInstances(self): inst_old = set(self.df_old.instance_id) inst_new = set(self.df_new.instance_id) inst_created = inst_new - inst_old inst_created = sorted(list(inst_created)) if len(inst_created) > 0: msg_1 = "Found new instances IDs: %s%s" msg_2 = ", ".join(inst_created[:5]) msg_3 = "..." if len(inst_created) > 5 else "" msg_4 = msg_1 % (msg_2, msg_3) raise IsitfitCliError(msg_4)
def noChanges(self): """ Fast way to identify that no changes """ to_json = lambda x: x.sort_values('instance_id', ascending=True)[ sorted(list(x.columns))].to_json(orient='records') json_old = to_json(self.df_old) json_new = to_json(self.df_new) if json_old == json_new: raise IsitfitCliError( "Aborting `tags push` due to no changes detected.")
def handle_pre(self, context_pre): from isitfit.utils import ping_matomo # set up caching if requested self.fetch_envvars() if self.isSetup(): self.connect() ping_matomo("/cost/setting?redis.is_configured=True") return context_pre # 0th pass to count n_ec2_total = context_pre['n_ec2_total'] ping_matomo("/cost/setting?redis.is_configured=False") # if more than 10 servers, recommend caching with redis cond_prompt = n_ec2_total > 10 and not self.isSetup() if cond_prompt: from termcolor import colored logger.warning( colored( """Since the number of EC2 instances is %i, it is recommended to use redis for caching of downloaded CPU/memory metrics. To do so - install redis [sudo] apt-get install redis-server - export environment variables export ISITFIT_REDIS_HOST=localhost export ISITFIT_REDIS_PORT=6379 export ISITFIT_REDIS_DB=0 where ISITFIT_REDIS_DB is the ID of an unused database in redis. And finally re-run isitfit as usual. """ % n_ec2_total, "yellow")) import click # not using abort=True so that I can send a custom message in the abort continue_wo_redis = click.confirm(colored( 'Would you like to continue without redis caching? ', 'cyan'), abort=False, default=True) if not continue_wo_redis: from isitfit.cli.click_descendents import IsitfitCliError raise IsitfitCliError("Aborting to set up redis.", context_pre['click_ctx']) # done return context_pre
def get(self, key): try: v1 = self.redis_client.get(key) except redis.exceptions.ResponseError as e: msg = 'Redis error: {e.__class__.__module__}.{e.__class__.__name__}: {e}'.format( e=e) # eg, 'redis.exceptions.ResponseError: invalid DB index' if 'invalid DB index' in str(e): import os msg += "\nHint: Check that environment variable ISITFIT_REDIS_DB=%s is a valid redis database index" % ( os.getenv("ISITFIT_REDIS_DB", None)) msg += "\nFor more info: https://stackoverflow.com/questions/13386053/how-do-i-change-between-redis-database" from isitfit.cli.click_descendents import IsitfitCliError raise IsitfitCliError(msg) if not v1: return v1 v2 = self.pyarrow_context.deserialize(v1) return v2
def set(self, key, df): # Note that in case data was not found, eg in mainManager._cloudwatch_metrics_core, an empty dataframe is returned (and thus passed in here) pybytes = self.pyarrow_context.serialize(df).to_buffer().to_pybytes() # if dataframe with shape[0]==0, raise (no longer supported) # Update 2019-12-17 Actually Cloudtrail needs to store an empty dataframe if there are no events in the last 90 days if not callable(df): from isitfit.cli.click_descendents import IsitfitCliError if type(df) != pd.DataFrame: raise IsitfitCliError( "Internal dev error: Only caching of callables or pandas dataframes supported as of isitfit 0.19" ) # Check comment above about Cloudtrail needing to store empty dataframes #if df.shape[0]==0: # raise IsitfitCliError("Internal dev error: caching empty dataframes is no longer supported as of isitfit 0.19") # set expiration of key-value pair to be 1 day if data was found, 10 minutes otherwise ex = SECONDS_IN_10MINS if callable(df) else SECONDS_IN_ONE_DAY # https://redis-py.readthedocs.io/en/latest/#redis.Redis.set self.redis_client.set(name=key, value=pybytes, ex=ex)
def after_all(self, context_all): """ concatenate the self.dfbin_d dict to pandas dataframe """ dfbin_l = [v.reset_index() for k, v in self.dfbin_d.items()] if len(dfbin_l) == 0: from isitfit.cli.click_descendents import IsitfitCliError raise IsitfitCliError("No data found") import pandas as pd self.dfbin_p = pd.concat(dfbin_l, axis=0) # rename the weird "Timestamp" column to "Field" self.dfbin_p.rename(columns={'index': 'Field'}, inplace=True) # set index self.dfbin_p.set_index(['Service', 'Field'], inplace=True) # save for later processing context_all['dfbin_p'] = self.dfbin_p # done return context_all
def iterate_core(self, display_tqdm=False): fx_l = [ 'service_name', 'service_description', 'paginator_name', 'paginator_entryJmespath', 'paginator_exception', 'entry_keyId', 'entry_keyCreated' ] for fx_i in fx_l: # https://stackoverflow.com/a/9058315/4126114 if fx_i not in self.__class__.__dict__.keys(): raise Exception("Derived class should set %s" % fx_i) # iterate on regions import botocore import boto3 import jmespath redshift_regions_full = boto3.Session().get_available_regions( self.service_name) import copy redshift_regions_sub = copy.deepcopy(redshift_regions_full) # redshift_regions_sub = ['us-west-2'] # FIXME if self.filter_region is not None: if self.filter_region not in redshift_regions_sub: msg_err = "Invalid region specified: %s. Supported values: %s" msg_err = msg_err % (self.filter_region, ", ".join(redshift_regions_sub)) raise IsitfitCliError(msg_err, None) # passing None for click context # over-ride redshift_regions_sub = [self.filter_region] # Before iterating, display a message that skipping some regions due to load from cache # The following conditions = region_include was loaded from cache if self.regionInclude_ready and len(redshift_regions_sub) != len( self.region_include) and not self.displayed_willskip: msg1 = "%s: Will skip %i out of %i regions which were either empty or inaccessible. To re-check, delete the local cache file %s" msg1 = msg1 % (self.service_description, len(redshift_regions_sub) - len(self.region_include), len(redshift_regions_sub), self.simpleCacheMan.filename) import click click.echo(colored(msg1, "yellow")) self.displayed_willskip = True # iterate region_iterator = redshift_regions_sub if display_tqdm: # add some spaces for aligning the progress bars desc = "%s, counting in all regions " % self.service_description desc = "%-50s" % desc region_iterator = self.tqdmman(region_iterator, total=len(redshift_regions_sub), desc=desc) for region_name in region_iterator: if self.regionInclude_ready and self.filter_region is None: if region_name not in self.region_include: # skip since already failed to use it continue logger.debug("Region %s" % region_name) boto3.setup_default_session(region_name=region_name) # boto3 clients # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift.html#Redshift.Client.describe_logging_status # Update 2019-12-09 # Unfolding the iterator can cause a rate limiting error for accounts with more than 200 EC2 # as reported by u/moofishies on 2019-11-12 # Similar to: https://github.com/boto/botocore/pull/891#issuecomment-303526763 # The max_attempts config here is increased from the default 4 to decrease the rate limiting chances # https://github.com/boto/botocore/pull/1260 # Note that with each extra retry, an exponential backoff is already implemented inside botocore # More: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html from botocore.config import Config service_client = boto3.client( self.service_name, config=Config(retries={'max_attempts': 10})) # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudwatch.html#metric self.cloudwatch_resource = boto3.resource('cloudwatch') # iterate on service resources, eg ec2 instances, redshift clusters paginator = service_client.get_paginator(self.paginator_name) rc_iterator = paginator.paginate() try: region_anyClusterFound = False for rc_describe_page in rc_iterator: rc_describe_entries = jmespath.search( self.paginator_entryJmespath, rc_describe_page) for rc_describe_entry in rc_describe_entries: region_anyClusterFound = True # add field for region rc_describe_entry['Region'] = region_name # yield yield rc_describe_entry if not self.regionInclude_ready and self.filter_region is None: if region_anyClusterFound: # only include if found clusters in this region self.region_include.append(region_name) except botocore.exceptions.ClientError as e: # Exception that means "no access to region" if e.response['Error']['Code'] == self.paginator_exception: continue # eg if user doesnt have access arn:aws:redshift:ap-northeast-1:974668457921:cluster:* # it could be because of specific access to region, or general access to the full redshift service # Note: capturing this exception means that the region is no longer included in the iterator, but it will still iterate over other regions if e.response['Error']['Code'] == 'AccessDenied': self.region_accessdenied.append(e) continue # Handle error: # botocore.exceptions.ClientError: An error occurred (InvalidClientTokenId) when calling the AssumeRole operation: The security token included in the request is invalid. # Not sure what this means, but maybe that a role is not allowed to assume into a region? # This error can be raised for example with using my local AWS profile "afc_external_readCur". # Here is an excerpt from my ~/.aws/credentials file # # Role created in Autofitcloud giving access to shadiakiki1986 to read CUR S3 # [afc_external_readCur] # role_arn = arn:aws:iam::123456789:role/external-read-athena-role-ExternalReadCURRole-abcdef # source_profile = a_user_profile_not_a_role # region = us-east-1 if e.response['Error']['Code'] == 'InvalidClientTokenId': continue # after setting up the InvalidClientTokenId filter above on the profile afc_external_readCur, # faced error: botocore.exceptions.ClientError: An error occurred (UnauthorizedOperation) when calling the DescribeInstances operation: You are not authorized to perform this operation. if e.response['Error']['Code'] == 'UnauthorizedOperation': continue # all other exceptions raised raise e # before exiting, check if a count just completed, and mark region_include as usable if not self.regionInclude_ready and self.filter_region is None: self.regionInclude_ready = True # save to cache self.simpleCacheMan.save_key(key='region_include', value=self.region_include) self.simpleCacheMan.save_key(key='region_accessdenied', value=self.region_accessdenied) # before exiting, if got some AccessDenied errors, display to user # Note 1: originally, I wanted to break the iterator on the 1st AccessDenied error, # thinking that it's because the user doesn't have permission to the service as a whole. # Later, I figured out that maybe the user has permission to a subset of regions, # in which case getting an error on region R1 is normal, # and the iterator should still proceed to the next region R2. if not self.displayed_accessdenied and len( self.region_accessdenied) > 0: # 1st part goes to stdout msgx = "AWS returned AccessDenied errors on %i out of %i regions. Use `isitfit --verbose ...` and re-run the command for more details" msgx = msgx % (len( self.region_accessdenied), len(redshift_regions_sub)) import click click.echo(colored(msgx, "yellow")) # 2nd part is too long, send it to --verbose msg2 = "\n".join( ["- %s" % str(e) for e in self.region_accessdenied]) msgx = "Here are the full error messages:\n%s" msgx = msgx % (msg2) logger.info(colored(msgx, "yellow")) self.displayed_accessdenied = True
def request(self, method, relative_url, payload_json, authenticated_user_path=True, anonymous_user_path=False): """ Wrapper to the URL request method - post relative_url - eg ./tags/suggest payload_json - "json" field for request call authenticated_user_path - False if can use the current local user True if need to use the isitfit-api-provided role Flag for self.register which can disable this as it doesn't have a account/user prefix in the URL anonymous_user_path - False if endpoint needs to get the AWS user information (requires execute-api permissions) True if endpoint can be done by requests library without any AWS info anonymous_user_path was introduced for the /share/email endpoint - which is different than the older /account/user/share/email endpoint - because some users used the root account - Also some users dont have the execute-api permission The combination anonymous_user_path=True and authenticated_user_path=True is not allowed - the other combinations make sense """ logger.debug("ApiMan::request") # relative URL to absolute # https://stackoverflow.com/a/8223955/4126114 if authenticated_user_path: suffix_url = './%s/%s/%s' % (self.r_sts['Account'], self.r_sts['UserId'], relative_url) else: suffix_url = relative_url import urllib.parse absolute_url = urllib.parse.urljoin(BASE_URL, suffix_url) import json logger.debug("%s %s %s" % (method, absolute_url, json.dumps(payload_json))) # mark timestamp right before request (used in listen_sqs for dropping stale messages) import datetime as dt dt_now = dt.datetime.utcnow() #.strftime('%s') # make actual request import requests auth = self._get_auth(authenticated_user_path, anonymous_user_path) r1 = requests.request(method, absolute_url, json=payload_json, auth=auth) #logger.debug("python requests http request header:") #logger.debug(r1.request.headers) # https://stackoverflow.com/questions/18810777/how-do-i-read-a-response-from-python-requests import json r2 = json.loads(r1.text) # check AWS-generated errors (lambda?) if 'message' in r2: if r2['message'] == 'Internal server error': raise IsitfitCliError('Internal server error', self.ctx) else: # print(r2) raise IsitfitCliError( 'Serverside error #2: %s' % r2['message'], self.ctx) # AWS API Gateway error if 'Message' in r2: raise IsitfitCliError("Serverside error #3: %s" % r2['Message'], self.ctx) # validate schema of response register_schema_2 = Schema({ 'isitfitapi_status': { 'code': str, 'description': str, }, 'isitfitapi_body': { Optional(str): object } }) try: register_schema_2.validate(r2) except SchemaError as e: import json logger.error("Received response: %s" % json.dumps(r2)) raise IsitfitCliError( "Does not match expected schema: %s" % str(e), self.ctx) # check for isitfit errors (in schema) if r2['isitfitapi_status']['code'] == 'error in schema': # print(r2) logger.debug("Detailed schema error message") logger.debug(r2['isitfitapi_status']['description']) raise IsitfitCliError( "The data sent to the server by isitfit does not match the expected format.", self.ctx) # check for isitfit errors (general) elif r2['isitfitapi_status']['code'] == 'error': # print(r2) raise IsitfitCliError( 'Serverside error #1: %s' % r2['isitfitapi_status']['description'], self.ctx) elif r2['isitfitapi_status']['code'] == 'Registration in progress': # do nothing. This will be handled by self.register # Also, note that from isitfit-api, only the /register can lead to this code pass elif r2['isitfitapi_status'][ 'code'] == 'Email verification in progress': # do nothing. This will be handled by EmailMan # Also, note that from isitfit-api, only the /share/email can lead to this code pass # check for isitfit unknown codes (i.e. maybe CLI is too old) elif r2['isitfitapi_status']['code'] != 'ok': # print(r2) msg = 'Unknown status code: %s. Description: %s' % ( r2['isitfitapi_status']['code'], r2['isitfitapi_status']['description']) raise IsitfitCliError(msg, self.ctx) # if ok return r2, dt_now
def send(self, share_email): if share_email is None: return if len(share_email) == 0: return logger.info("Sending email") # pre-process spaces if any share_email = [x.strip() for x in share_email] # get resources available # Update 2019-12-13 no need to register with API after istifit-api /share/email became anonymous # self.api_man.register() # submit POST http request response_json, dt_now = self._send_core(share_email) # check response status # Update 2019-12-12 Instead of raising an exception and aborting, # show the user a prompt to check his/her email # and give the program a chance to re-send the email import click while ((response_json['isitfitapi_status']['code'] == 'Email verification in progress') and (self.try_again > 0)): # https://click.palletsprojects.com/en/7.x/api/?highlight=click%20confirm#click.pause click.pause( info= 'A verification link was emailed to you now. Please click the link, then press any key here to continue...', err=False) self.try_again -= 1 response_json, dt_now = self._send_core(share_email) if self.try_again == 0: raise IsitfitCliError( response_json['isitfitapi_status']['description'], self.ctx) # Update 2019-12-12 This code will get handled by apiMan and will never arrive here, so commenting it out #if response_json['isitfitapi_status']['code']=='error': # raise IsitfitCliError(response_json['isitfitapi_status']['description'], self.ctx) if response_json['isitfitapi_status']['code'] != 'ok': response_str = json.dumps(response_json) raise IsitfitCliError( "Unsupported response from server: %s" % response_str, self.ctx) # Save the 1st entry as "last-used email" # Make sure to save this *after* the verification steps above are done so as to maintain the last *working* email self.last_email.set(share_email[0]) # validate schema from schema import SchemaError, Schema, Optional register_schema_2 = Schema({'from': str, Optional(str): object}) try: register_schema_2.validate(response_json['isitfitapi_body']) except SchemaError as e: responseBody_str = json.dumps(response_json['isitfitapi_body']) err_msg = "Received response body: %s. Schema error: %s" % ( responseBody_str, str(e)) raise IsitfitCliError(err_msg, self.ctx) # otherwise proceed emailFrom = response_json['isitfitapi_body']['from'] import click click.echo("Email sent from %s to: %s" % (emailFrom, ", ".join(share_email))) return
def register(self): logger.debug("ApiMan::register") if self.r_register is not None: if self.r_register['isitfitapi_status']['code'] == 'ok': # already registered self.r_body = self.r_register['isitfitapi_body'] return # counter self.call_n += 1 # display head if self.call_n == 1: logger.debug("Logging into server") else: logger.debug("Registration attempt # %i." % (self.call_n)) import boto3 sts_client = boto3.client('sts') self.r_sts = sts_client.get_caller_identity() del self.r_sts['ResponseMetadata'] # eg {'UserId': 'AIDA6F3WEM7AXY6Y4VWDC', 'Account': '974668457921', 'Arn': 'arn:aws:iam::974668457921:user/shadi'} # actual request self.r_register, dt_now = self.request( method='post', relative_url='./register', payload_json=self.r_sts, authenticated_user_path= False # since /register is the absolute path (without account/user) ) # set shortcut to body only self.r_body = self.r_register['isitfitapi_body'] # handle registration in progress if self.r_register['isitfitapi_status'][ 'code'] == 'Registration in progress': # deal with "registration in progress" if self.call_n == 1: # just continue and will check again later logger.debug("Registration in progress") elif self.call_n >= self.n_maxCalls: raise IsitfitCliError( "Registration is still not ready. Please try again in a few minutes, or file an issue at https://github.com/autofitcloud/isitfit/issues", self.ctx) else: logger.debug("Registration not ready yet.") if self.call_n >= self.tryAgainIn: logger.debug("Will check again in %i seconds" % (self.nsecs_wait)) import time #from tqdm import tqdm as tqdm_obj from isitfit.tqdmman import TqdmL2Verbose tqdm_obj = TqdmL2Verbose(self.ctx) tqdm_iter = tqdm_obj( range(self.nsecs_wait), desc= "First-time service access (isitfit.io) takes ~ 1 minute") for i in tqdm_iter: time.sleep(1) self.register() return # stop here return # at this stage, registration was ok, so proceed if self.call_n == 1: logger.debug("Registration already done earlier") else: logger.debug("Registration complete") # check schema register_schema_1 = Schema({ 's3_arn': str, 's3_bucketName': str, 's3_keyPrefix': str, 'sqs_url': str, 'role_arn': str, Optional(str): object }) try: register_schema_1.validate(self.r_body) except SchemaError as e: import json logger.error("Received response: %s" % json.dumps(self.r_body)) raise IsitfitCliError( "Does not match expected schema: %s" % str(e), self.ctx) # show resources granted # print(self.r_register) logger.debug( "AutofitCloud granted you access to the following AWS resources:") from tabulate import tabulate logger.debug( tabulate([(k, self.r_body[k]) for k in self.r_body.keys()])) logger.debug( "Note that account number 974668457921 is AutofitCloud, the company behind isitfit." ) logger.debug("For more info, visit https://autofitcloud.com/privacy") # get boto3 session using the assumed role # for further use of aws resources from AutofitCloud # eg API Gateway, S3, SQS sts_connection = boto3.client('sts') acct_b = sts_connection.assume_role( RoleArn=self.r_body['role_arn'], # TODO set external ID? #ExternalId=None, # this is a generic session name that shows up in the User ID field # on the server-side. If this gets modified, make sure to modify the # counter-part in isitfit-api RoleSessionName="cross_acct_isitfit") self.boto3_session = boto3.session.Session( aws_access_key_id=acct_b['Credentials']['AccessKeyId'], aws_secret_access_key=acct_b['Credentials']['SecretAccessKey'], aws_session_token=acct_b['Credentials']['SessionToken'], # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html#SQS.Queue.receive_messages # region matches with the serverless.yml region region_name='us-east-1') # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sqs.html#SQS.Queue.receive_messages # region matches with the serverless.yml region sqs_res = self.boto3_session.resource( 'sqs' ) # no need for region since already in boto session # , region_name='us-east-1') self.sqs_q = sqs_res.Queue(self.r_body['sqs_url'])